Spaces:
Sleeping
Sleeping
Commit
·
5d267ad
1
Parent(s):
dd39d84
initial deployment
Browse files- .cursorrules +268 -0
- .gitignore +42 -0
- CHANGELOG.md +70 -0
- README.md +76 -0
- Spacefile +14 -0
- app.py +0 -18
- app/__init__.py +3 -0
- app/main.py +126 -0
- app/models/__init__.py +3 -0
- app/models/user.py +32 -0
- app/services/groq_search.py +171 -0
- frontend/app.py +219 -0
- requirements.txt +11 -0
- run.py +4 -0
.cursorrules
ADDED
@@ -0,0 +1,268 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# FastAPI + Streamlit + Groq Project Setup Rules
|
2 |
+
|
3 |
+
## Project Structure
|
4 |
+
```
|
5 |
+
.
|
6 |
+
├── app/
|
7 |
+
│ ├── __init__.py
|
8 |
+
│ ├── main.py # FastAPI application
|
9 |
+
│ ├── models/
|
10 |
+
│ │ ├── __init__.py
|
11 |
+
│ │ └── base.py # Pydantic models
|
12 |
+
│ ├── services/
|
13 |
+
│ │ ├── __init__.py
|
14 |
+
│ │ └── llm.py # LLM integration
|
15 |
+
│ └── utils/
|
16 |
+
│ ├── __init__.py
|
17 |
+
│ └── helpers.py # Utility functions
|
18 |
+
├── frontend/
|
19 |
+
│ ├── __init__.py
|
20 |
+
│ └── app.py # Streamlit application
|
21 |
+
├── data/ # Data storage
|
22 |
+
├── tests/
|
23 |
+
│ ├── __init__.py
|
24 |
+
│ ├── test_api.py
|
25 |
+
│ └── test_services.py
|
26 |
+
├── .env # Environment variables
|
27 |
+
├── .gitignore
|
28 |
+
├── README.md
|
29 |
+
├── requirements.txt
|
30 |
+
└── run.py # Application entry point
|
31 |
+
```
|
32 |
+
|
33 |
+
## Required Dependencies
|
34 |
+
```python
|
35 |
+
# requirements.txt
|
36 |
+
fastapi>=0.100.0
|
37 |
+
uvicorn>=0.22.0
|
38 |
+
pydantic>=2.0.0
|
39 |
+
streamlit>=1.25.0
|
40 |
+
requests>=2.31.0
|
41 |
+
python-multipart>=0.0.6
|
42 |
+
python-dotenv>=1.0.0
|
43 |
+
groq>=0.4.0
|
44 |
+
pytest>=7.4.0
|
45 |
+
httpx>=0.24.0 # For testing
|
46 |
+
```
|
47 |
+
|
48 |
+
## Environment Variables
|
49 |
+
```bash
|
50 |
+
# .env
|
51 |
+
GROQ_API_KEY=your-api-key
|
52 |
+
ENVIRONMENT=development
|
53 |
+
CORS_ORIGINS=["http://localhost:8501"]
|
54 |
+
```
|
55 |
+
|
56 |
+
## Best Practices
|
57 |
+
|
58 |
+
### FastAPI Setup
|
59 |
+
```python
|
60 |
+
# app/main.py
|
61 |
+
from fastapi import FastAPI, HTTPException
|
62 |
+
from fastapi.middleware.cors import CORSMiddleware
|
63 |
+
from dotenv import load_dotenv
|
64 |
+
import os
|
65 |
+
|
66 |
+
# Load environment variables
|
67 |
+
load_dotenv()
|
68 |
+
|
69 |
+
app = FastAPI(
|
70 |
+
title="Your App Name",
|
71 |
+
description="Your app description",
|
72 |
+
version="1.0.0"
|
73 |
+
)
|
74 |
+
|
75 |
+
# CORS setup
|
76 |
+
app.add_middleware(
|
77 |
+
CORSMiddleware,
|
78 |
+
allow_origins=json.loads(os.getenv("CORS_ORIGINS", '["http://localhost:8501"]')),
|
79 |
+
allow_credentials=True,
|
80 |
+
allow_methods=["*"],
|
81 |
+
allow_headers=["*"],
|
82 |
+
)
|
83 |
+
|
84 |
+
# Error handling
|
85 |
+
@app.exception_handler(HTTPException)
|
86 |
+
async def http_exception_handler(request, exc):
|
87 |
+
return JSONResponse(
|
88 |
+
status_code=exc.status_code,
|
89 |
+
content={"detail": exc.detail},
|
90 |
+
)
|
91 |
+
```
|
92 |
+
|
93 |
+
### Pydantic Models
|
94 |
+
```python
|
95 |
+
# app/models/base.py
|
96 |
+
from pydantic import BaseModel, Field
|
97 |
+
from typing import Optional, List
|
98 |
+
from datetime import datetime
|
99 |
+
from uuid import UUID, uuid4
|
100 |
+
|
101 |
+
class BaseModelWithTimestamp(BaseModel):
|
102 |
+
id: UUID = Field(default_factory=uuid4)
|
103 |
+
created_at: datetime = Field(default_factory=datetime.utcnow)
|
104 |
+
updated_at: datetime = Field(default_factory=datetime.utcnow)
|
105 |
+
|
106 |
+
def model_dump(self, *args, **kwargs):
|
107 |
+
data = super().model_dump(*args, **kwargs)
|
108 |
+
# Convert UUID and datetime to string
|
109 |
+
data['id'] = str(data['id'])
|
110 |
+
data['created_at'] = data['created_at'].isoformat()
|
111 |
+
data['updated_at'] = data['updated_at'].isoformat()
|
112 |
+
return data
|
113 |
+
```
|
114 |
+
|
115 |
+
### Streamlit Setup
|
116 |
+
```python
|
117 |
+
# frontend/app.py
|
118 |
+
import streamlit as st
|
119 |
+
import requests
|
120 |
+
from typing import Dict, List
|
121 |
+
import os
|
122 |
+
from dotenv import load_dotenv
|
123 |
+
|
124 |
+
# Load environment variables
|
125 |
+
load_dotenv()
|
126 |
+
|
127 |
+
# Page config
|
128 |
+
st.set_page_config(
|
129 |
+
page_title="Your App Name",
|
130 |
+
page_icon="🚀",
|
131 |
+
layout="wide",
|
132 |
+
initial_sidebar_state="expanded"
|
133 |
+
)
|
134 |
+
|
135 |
+
# API client setup
|
136 |
+
class APIClient:
|
137 |
+
def __init__(self):
|
138 |
+
self.base_url = os.getenv("API_URL", "http://localhost:8000")
|
139 |
+
|
140 |
+
def _handle_response(self, response):
|
141 |
+
if response.ok:
|
142 |
+
return response.json()
|
143 |
+
st.error(f"Error: {response.status_code} - {response.text}")
|
144 |
+
return None
|
145 |
+
|
146 |
+
def get(self, endpoint: str):
|
147 |
+
try:
|
148 |
+
response = requests.get(f"{self.base_url}{endpoint}")
|
149 |
+
return self._handle_response(response)
|
150 |
+
except Exception as e:
|
151 |
+
st.error(f"API Error: {str(e)}")
|
152 |
+
return None
|
153 |
+
|
154 |
+
api = APIClient()
|
155 |
+
```
|
156 |
+
|
157 |
+
### LLM Integration
|
158 |
+
```python
|
159 |
+
# app/services/llm.py
|
160 |
+
from groq import Groq
|
161 |
+
from dotenv import load_dotenv
|
162 |
+
import os
|
163 |
+
import json
|
164 |
+
from typing import List, Dict, Any
|
165 |
+
|
166 |
+
load_dotenv()
|
167 |
+
|
168 |
+
class LLMService:
|
169 |
+
def __init__(self):
|
170 |
+
api_key = os.getenv("GROQ_API_KEY")
|
171 |
+
if not api_key:
|
172 |
+
raise ValueError("GROQ_API_KEY not set")
|
173 |
+
self.client = Groq(api_key=api_key)
|
174 |
+
|
175 |
+
def _handle_response(self, response_text: str) -> Dict[str, Any]:
|
176 |
+
try:
|
177 |
+
return json.loads(response_text)
|
178 |
+
except json.JSONDecodeError as e:
|
179 |
+
print(f"Error parsing LLM response: {e}")
|
180 |
+
return None
|
181 |
+
```
|
182 |
+
|
183 |
+
### Data Storage
|
184 |
+
```python
|
185 |
+
# app/utils/storage.py
|
186 |
+
import json
|
187 |
+
from pathlib import Path
|
188 |
+
from typing import Dict, Any
|
189 |
+
from fastapi import HTTPException
|
190 |
+
|
191 |
+
class JSONStorage:
|
192 |
+
def __init__(self, file_path: str):
|
193 |
+
self.file_path = Path(file_path)
|
194 |
+
self.file_path.parent.mkdir(exist_ok=True)
|
195 |
+
|
196 |
+
def read(self) -> Dict[str, Any]:
|
197 |
+
try:
|
198 |
+
if not self.file_path.exists():
|
199 |
+
return {}
|
200 |
+
with open(self.file_path, 'r') as f:
|
201 |
+
return json.load(f)
|
202 |
+
except json.JSONDecodeError:
|
203 |
+
return {}
|
204 |
+
|
205 |
+
def write(self, data: Dict[str, Any]):
|
206 |
+
temp_file = self.file_path.with_suffix('.tmp')
|
207 |
+
try:
|
208 |
+
with open(temp_file, 'w') as f:
|
209 |
+
json.dump(data, f, indent=2)
|
210 |
+
temp_file.replace(self.file_path)
|
211 |
+
except Exception as e:
|
212 |
+
if temp_file.exists():
|
213 |
+
temp_file.unlink()
|
214 |
+
raise HTTPException(status_code=500, detail=str(e))
|
215 |
+
```
|
216 |
+
|
217 |
+
## Common Issues & Solutions
|
218 |
+
|
219 |
+
1. **Environment Variables**
|
220 |
+
- Always use python-dotenv
|
221 |
+
- Check variables at startup
|
222 |
+
- Provide clear error messages
|
223 |
+
|
224 |
+
2. **JSON Handling**
|
225 |
+
- Always use try-except for JSON operations
|
226 |
+
- Implement atomic writes
|
227 |
+
- Validate data before saving
|
228 |
+
|
229 |
+
3. **API Errors**
|
230 |
+
- Implement proper error handling
|
231 |
+
- Use appropriate HTTP status codes
|
232 |
+
- Return meaningful error messages
|
233 |
+
|
234 |
+
4. **LLM Integration**
|
235 |
+
- Handle malformed responses
|
236 |
+
- Implement fallback mechanisms
|
237 |
+
- Cache expensive operations
|
238 |
+
|
239 |
+
5. **Frontend**
|
240 |
+
- Show loading states
|
241 |
+
- Handle API errors gracefully
|
242 |
+
- Validate input before submission
|
243 |
+
|
244 |
+
6. **Testing**
|
245 |
+
- Write tests for API endpoints
|
246 |
+
- Mock external services
|
247 |
+
- Test error conditions
|
248 |
+
|
249 |
+
## Security Considerations
|
250 |
+
|
251 |
+
1. **Environment Variables**
|
252 |
+
- Never commit .env files
|
253 |
+
- Use secure secrets management in production
|
254 |
+
|
255 |
+
2. **API Security**
|
256 |
+
- Implement rate limiting
|
257 |
+
- Add authentication when needed
|
258 |
+
- Validate all inputs
|
259 |
+
|
260 |
+
3. **CORS**
|
261 |
+
- Restrict origins in production
|
262 |
+
- Only allow necessary methods
|
263 |
+
- Handle credentials properly
|
264 |
+
|
265 |
+
4. **Data Storage**
|
266 |
+
- Implement backup mechanisms
|
267 |
+
- Use atomic operations
|
268 |
+
- Validate data integrity
|
.gitignore
ADDED
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Python
|
2 |
+
__pycache__/
|
3 |
+
*.py[cod]
|
4 |
+
*$py.class
|
5 |
+
*.so
|
6 |
+
.Python
|
7 |
+
env/
|
8 |
+
build/
|
9 |
+
develop-eggs/
|
10 |
+
dist/
|
11 |
+
downloads/
|
12 |
+
eggs/
|
13 |
+
.eggs/
|
14 |
+
lib/
|
15 |
+
lib64/
|
16 |
+
parts/
|
17 |
+
sdist/
|
18 |
+
var/
|
19 |
+
wheels/
|
20 |
+
*.egg-info/
|
21 |
+
.installed.cfg
|
22 |
+
*.egg
|
23 |
+
|
24 |
+
# Virtual Environment
|
25 |
+
venv/
|
26 |
+
ENV/
|
27 |
+
|
28 |
+
# IDE
|
29 |
+
.idea/
|
30 |
+
.vscode/
|
31 |
+
*.swp
|
32 |
+
*.swo
|
33 |
+
|
34 |
+
# Environment Variables
|
35 |
+
.env
|
36 |
+
.env.*
|
37 |
+
|
38 |
+
# Project specific
|
39 |
+
data/
|
40 |
+
*.log
|
41 |
+
.coverage
|
42 |
+
htmlcov/
|
CHANGELOG.md
ADDED
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Changelog
|
2 |
+
|
3 |
+
## [1.0.0] - Initial Release
|
4 |
+
|
5 |
+
### Added
|
6 |
+
- Basic FastAPI backend setup with CRUD operations
|
7 |
+
- Streamlit frontend with profile creation and viewing
|
8 |
+
- JSON-based data storage with atomic writes
|
9 |
+
- Basic search functionality
|
10 |
+
|
11 |
+
### Bug Fixes
|
12 |
+
- Fixed UUID serialization in JSON storage
|
13 |
+
- Added proper error handling for file operations
|
14 |
+
- Fixed CORS middleware configuration
|
15 |
+
|
16 |
+
## [1.1.0] - Groq Integration
|
17 |
+
|
18 |
+
### Added
|
19 |
+
- Integrated Groq LLM for semantic search
|
20 |
+
- Added natural language query processing
|
21 |
+
- Enhanced search results with match scores and explanations
|
22 |
+
|
23 |
+
### Bug Fixes
|
24 |
+
- Fixed environment variable loading for Groq API key
|
25 |
+
- Added proper JSON response parsing with fallback
|
26 |
+
- Improved error handling in search functionality
|
27 |
+
|
28 |
+
### Technical Improvements
|
29 |
+
- Added atomic file operations for data storage
|
30 |
+
- Implemented proper package structure with __init__.py files
|
31 |
+
- Added type hints and documentation
|
32 |
+
- Enhanced error messages and user feedback
|
33 |
+
|
34 |
+
## Best Practices & Lessons Learned
|
35 |
+
|
36 |
+
### Environment Setup
|
37 |
+
- Always use python-dotenv for environment variable management
|
38 |
+
- Keep .env file in root directory
|
39 |
+
- Add .env to .gitignore
|
40 |
+
- Document required environment variables in README
|
41 |
+
|
42 |
+
### Data Handling
|
43 |
+
- Use atomic operations for file writes
|
44 |
+
- Always validate JSON before writing
|
45 |
+
- Implement proper error handling for file operations
|
46 |
+
- Use Pydantic models for data validation
|
47 |
+
|
48 |
+
### API Design
|
49 |
+
- Implement proper response models
|
50 |
+
- Add comprehensive error handling
|
51 |
+
- Use proper HTTP status codes
|
52 |
+
- Document API endpoints
|
53 |
+
|
54 |
+
### Frontend
|
55 |
+
- Implement proper form validation
|
56 |
+
- Add clear error messages
|
57 |
+
- Show loading states
|
58 |
+
- Handle API errors gracefully
|
59 |
+
|
60 |
+
### Search Functionality
|
61 |
+
- Implement fallback search mechanisms
|
62 |
+
- Handle malformed LLM responses
|
63 |
+
- Provide clear search examples
|
64 |
+
- Show detailed match explanations
|
65 |
+
|
66 |
+
### Known Issues & Limitations
|
67 |
+
- LLM responses might sometimes be inconsistent
|
68 |
+
- JSON parsing can fail with malformed LLM output
|
69 |
+
- Basic authentication not implemented
|
70 |
+
- No rate limiting implemented
|
README.md
CHANGED
@@ -10,3 +10,79 @@ pinned: false
|
|
10 |
---
|
11 |
|
12 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
10 |
---
|
11 |
|
12 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
13 |
+
|
14 |
+
# 100xEngineers Discovery Platform 🚀
|
15 |
+
|
16 |
+
A platform for discovering and connecting with engineers based on their technical skills, AI expertise, and collaboration interests. Built with FastAPI, Streamlit, and powered by Groq LLM for intelligent profile matching.
|
17 |
+
|
18 |
+
## Features
|
19 |
+
|
20 |
+
- 👤 Create and manage detailed engineer profiles
|
21 |
+
- 🔍 Natural language search powered by Groq LLM
|
22 |
+
- 🤝 Find collaborators based on skills and interests
|
23 |
+
- 📊 View all registered profiles
|
24 |
+
- 🎯 Get detailed match explanations
|
25 |
+
|
26 |
+
## Tech Stack
|
27 |
+
|
28 |
+
- **Backend**: FastAPI
|
29 |
+
- **Frontend**: Streamlit
|
30 |
+
- **LLM Integration**: Groq
|
31 |
+
- **Data Storage**: JSON with atomic operations
|
32 |
+
- **Deployment**: Hugging Face Spaces
|
33 |
+
|
34 |
+
## Environment Variables
|
35 |
+
|
36 |
+
The following environment variables need to be set in your Hugging Face Space:
|
37 |
+
|
38 |
+
- `GROQ_API_KEY`: Your Groq API key
|
39 |
+
- `HF_SPACE_URL`: Your Hugging Face Space URL (set automatically)
|
40 |
+
- `ENVIRONMENT`: Set to "production" for deployment
|
41 |
+
- `CORS_ORIGINS`: List of allowed origins (automatically configured)
|
42 |
+
|
43 |
+
## Local Development
|
44 |
+
|
45 |
+
1. Clone the repository
|
46 |
+
2. Create a virtual environment:
|
47 |
+
```bash
|
48 |
+
python -m venv venv
|
49 |
+
source venv/bin/activate # On Windows: venv\Scripts\activate
|
50 |
+
```
|
51 |
+
|
52 |
+
3. Install dependencies:
|
53 |
+
```bash
|
54 |
+
pip install -r requirements.txt
|
55 |
+
```
|
56 |
+
|
57 |
+
4. Create a `.env` file with required variables
|
58 |
+
5. Run the application:
|
59 |
+
```bash
|
60 |
+
# Terminal 1: Backend
|
61 |
+
python run.py
|
62 |
+
|
63 |
+
# Terminal 2: Frontend
|
64 |
+
streamlit run frontend/app.py
|
65 |
+
```
|
66 |
+
|
67 |
+
## Deployment
|
68 |
+
|
69 |
+
This application is deployed on Hugging Face Spaces. The deployment is configured using the `Spacefile` which sets up both the FastAPI backend and Streamlit frontend services.
|
70 |
+
|
71 |
+
## Usage
|
72 |
+
|
73 |
+
1. **Create Profile**: Add your engineering profile with skills, expertise, and interests
|
74 |
+
2. **Search Profiles**: Use natural language to find matching engineers
|
75 |
+
3. **View Matches**: See detailed explanations of why profiles match your search
|
76 |
+
4. **Browse All**: View all registered engineer profiles
|
77 |
+
|
78 |
+
## Contributing
|
79 |
+
|
80 |
+
1. Fork the repository
|
81 |
+
2. Create a feature branch
|
82 |
+
3. Commit your changes
|
83 |
+
4. Push to the branch
|
84 |
+
5. Create a Pull Request
|
85 |
+
|
86 |
+
## License
|
87 |
+
|
88 |
+
MIT License - feel free to use this project as a template for your own applications!
|
Spacefile
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Spacefile Docs: https://huggingface.co/docs/hub/spaces-config-reference
|
2 |
+
configuration:
|
3 |
+
hardware:
|
4 |
+
cpu: 2
|
5 |
+
memory: 16
|
6 |
+
services:
|
7 |
+
- name: fastapi
|
8 |
+
dist: python
|
9 |
+
port: 8000
|
10 |
+
command: uvicorn app.main:app --host 0.0.0.0 --port 8000
|
11 |
+
- name: streamlit
|
12 |
+
dist: python
|
13 |
+
port: 8501
|
14 |
+
command: streamlit run frontend/app.py
|
app.py
CHANGED
@@ -1,18 +0,0 @@
|
|
1 |
-
# frontend/app.py
|
2 |
-
import streamlit as st
|
3 |
-
|
4 |
-
st.set_page_config(
|
5 |
-
page_title="100xEngineers Discovery Platform",
|
6 |
-
layout="centered"
|
7 |
-
)
|
8 |
-
|
9 |
-
st.title("100xEngineers Discovery Platform")
|
10 |
-
|
11 |
-
st.markdown("""
|
12 |
-
Welcome to the 100xEngineers Discovery Platform!
|
13 |
-
Use the pages on the left to navigate through the app:
|
14 |
-
- **🔑 Login**
|
15 |
-
- **✏️ Edit Profile**
|
16 |
-
- **🔍 Search**
|
17 |
-
- **👤 Profile View**
|
18 |
-
""")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
app/__init__.py
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
100xEngineers Discovery Platform - Backend Package
|
3 |
+
"""
|
app/main.py
ADDED
@@ -0,0 +1,126 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from fastapi import FastAPI, HTTPException
|
2 |
+
from fastapi.middleware.cors import CORSMiddleware
|
3 |
+
from typing import List, Dict, Tuple
|
4 |
+
from pydantic import BaseModel
|
5 |
+
import json
|
6 |
+
import os
|
7 |
+
from pathlib import Path
|
8 |
+
from app.models.user import UserProfile
|
9 |
+
from app.services.groq_search import GroqSearchService
|
10 |
+
from dotenv import load_dotenv
|
11 |
+
|
12 |
+
# Load environment variables
|
13 |
+
load_dotenv()
|
14 |
+
|
15 |
+
app = FastAPI(title="100xEngineers Discovery Platform")
|
16 |
+
|
17 |
+
# Initialize Groq service with error handling
|
18 |
+
try:
|
19 |
+
groq_search = GroqSearchService()
|
20 |
+
except Exception as e:
|
21 |
+
print(f"Warning: Failed to initialize Groq service: {str(e)}")
|
22 |
+
groq_search = None
|
23 |
+
|
24 |
+
# CORS middleware setup
|
25 |
+
app.add_middleware(
|
26 |
+
CORSMiddleware,
|
27 |
+
allow_origins=["*"], # In production, replace with specific origins
|
28 |
+
allow_credentials=True,
|
29 |
+
allow_methods=["*"],
|
30 |
+
allow_headers=["*"],
|
31 |
+
)
|
32 |
+
|
33 |
+
# Initialize data storage
|
34 |
+
DATA_FILE = Path("data/profiles.json")
|
35 |
+
DATA_FILE.parent.mkdir(exist_ok=True)
|
36 |
+
|
37 |
+
# Initialize the JSON file if it doesn't exist
|
38 |
+
if not DATA_FILE.exists():
|
39 |
+
with open(DATA_FILE, "w") as f:
|
40 |
+
json.dump({}, f)
|
41 |
+
|
42 |
+
def load_profiles() -> Dict[str, UserProfile]:
|
43 |
+
try:
|
44 |
+
with open(DATA_FILE, "r") as f:
|
45 |
+
try:
|
46 |
+
data = json.load(f)
|
47 |
+
return {k: UserProfile(**v) for k, v in data.items()}
|
48 |
+
except json.JSONDecodeError:
|
49 |
+
# If file is corrupted, start fresh
|
50 |
+
return {}
|
51 |
+
except FileNotFoundError:
|
52 |
+
# Create file if it doesn't exist
|
53 |
+
with open(DATA_FILE, "w") as f:
|
54 |
+
json.dump({}, f)
|
55 |
+
return {}
|
56 |
+
|
57 |
+
def save_profiles(profiles: Dict[str, UserProfile]):
|
58 |
+
# Create directory if it doesn't exist
|
59 |
+
DATA_FILE.parent.mkdir(exist_ok=True)
|
60 |
+
|
61 |
+
# Write to a temporary file first
|
62 |
+
temp_file = DATA_FILE.with_suffix('.tmp')
|
63 |
+
try:
|
64 |
+
with open(temp_file, "w") as f:
|
65 |
+
# Use model_dump() which now handles UUID conversion
|
66 |
+
json.dump({k: v.model_dump() for k, v in profiles.items()}, f, indent=2)
|
67 |
+
|
68 |
+
# Rename temp file to actual file (atomic operation)
|
69 |
+
temp_file.replace(DATA_FILE)
|
70 |
+
except Exception as e:
|
71 |
+
if temp_file.exists():
|
72 |
+
temp_file.unlink() # Delete temp file if it exists
|
73 |
+
raise HTTPException(status_code=500, detail=str(e))
|
74 |
+
|
75 |
+
# API endpoints
|
76 |
+
@app.post("/api/profiles", response_model=UserProfile)
|
77 |
+
async def create_profile(profile: UserProfile):
|
78 |
+
profiles = load_profiles()
|
79 |
+
profile_id = str(profile.id)
|
80 |
+
profiles[profile_id] = profile
|
81 |
+
save_profiles(profiles)
|
82 |
+
return profile
|
83 |
+
|
84 |
+
@app.get("/api/profiles", response_model=List[UserProfile])
|
85 |
+
async def list_profiles():
|
86 |
+
profiles = load_profiles()
|
87 |
+
return list(profiles.values())
|
88 |
+
|
89 |
+
@app.get("/api/profiles/{profile_id}", response_model=UserProfile)
|
90 |
+
async def get_profile(profile_id: str):
|
91 |
+
profiles = load_profiles()
|
92 |
+
if profile_id not in profiles:
|
93 |
+
raise HTTPException(status_code=404, detail="Profile not found")
|
94 |
+
return profiles[profile_id]
|
95 |
+
|
96 |
+
# Update SearchResponse model
|
97 |
+
class SearchResponse(BaseModel):
|
98 |
+
profile: UserProfile
|
99 |
+
explanation: str
|
100 |
+
|
101 |
+
class SearchQuery(BaseModel):
|
102 |
+
query: str
|
103 |
+
|
104 |
+
@app.post("/api/search", response_model=List[SearchResponse])
|
105 |
+
async def search_profiles(search: SearchQuery):
|
106 |
+
profiles = load_profiles()
|
107 |
+
|
108 |
+
if not groq_search:
|
109 |
+
# Fallback to basic search if Groq is not available
|
110 |
+
results = []
|
111 |
+
query = search.query.lower()
|
112 |
+
for profile in profiles.values():
|
113 |
+
if (query in profile.name.lower() or
|
114 |
+
any(query in skill.lower() for skill in profile.technical_skills) or
|
115 |
+
any(query in expertise.lower() for expertise in profile.ai_expertise) or
|
116 |
+
query in profile.mentoring_preferences.lower()):
|
117 |
+
results.append((profile, "Basic match based on keyword search"))
|
118 |
+
return [SearchResponse(profile=profile, explanation=explanation)
|
119 |
+
for profile, explanation in results]
|
120 |
+
|
121 |
+
# Use Groq for semantic search
|
122 |
+
matches = groq_search.search_profiles(search.query, list(profiles.values()))
|
123 |
+
|
124 |
+
# Convert to response format
|
125 |
+
return [SearchResponse(profile=profile, explanation=explanation)
|
126 |
+
for profile, explanation in matches]
|
app/models/__init__.py
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
100xEngineers Discovery Platform - Models Package
|
3 |
+
"""
|
app/models/user.py
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from pydantic import BaseModel, Field, HttpUrl, AnyHttpUrl
|
2 |
+
from typing import List, Optional
|
3 |
+
from uuid import UUID, uuid4
|
4 |
+
|
5 |
+
class UserProfile(BaseModel):
|
6 |
+
id: UUID = Field(default_factory=uuid4)
|
7 |
+
name: str = Field(..., min_length=2, max_length=100)
|
8 |
+
technical_skills: List[str] = Field(default_factory=list)
|
9 |
+
projects: List[str] = Field(default_factory=list)
|
10 |
+
ai_expertise: List[str] = Field(default_factory=list)
|
11 |
+
mentoring_preferences: str = Field(..., min_length=10, max_length=500)
|
12 |
+
collaboration_interests: List[str] = Field(default_factory=list)
|
13 |
+
portfolio_url: Optional[str] = None
|
14 |
+
|
15 |
+
class Config:
|
16 |
+
json_schema_extra = {
|
17 |
+
"example": {
|
18 |
+
"name": "John Doe",
|
19 |
+
"technical_skills": ["Python", "FastAPI", "Machine Learning"],
|
20 |
+
"projects": ["AI Chatbot", "Web Scraping Tool"],
|
21 |
+
"ai_expertise": ["NLP", "Computer Vision"],
|
22 |
+
"mentoring_preferences": "Available for weekly 1-hour sessions, focusing on AI and backend development",
|
23 |
+
"collaboration_interests": ["Open Source", "AI Projects"],
|
24 |
+
"portfolio_url": "https://github.com/johndoe"
|
25 |
+
}
|
26 |
+
}
|
27 |
+
|
28 |
+
def model_dump(self, *args, **kwargs):
|
29 |
+
data = super().model_dump(*args, **kwargs)
|
30 |
+
# Convert UUID to string
|
31 |
+
data['id'] = str(data['id'])
|
32 |
+
return data
|
app/services/groq_search.py
ADDED
@@ -0,0 +1,171 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
from typing import List, Dict, Tuple
|
3 |
+
from groq import Groq
|
4 |
+
from app.models.user import UserProfile
|
5 |
+
from dotenv import load_dotenv
|
6 |
+
import json
|
7 |
+
|
8 |
+
# Load environment variables from .env file
|
9 |
+
load_dotenv()
|
10 |
+
|
11 |
+
class GroqSearchService:
|
12 |
+
def __init__(self):
|
13 |
+
api_key = os.getenv("GROQ_API_KEY")
|
14 |
+
if not api_key:
|
15 |
+
raise ValueError("GROQ_API_KEY environment variable is not set")
|
16 |
+
|
17 |
+
self.client = Groq(
|
18 |
+
api_key=api_key,
|
19 |
+
)
|
20 |
+
|
21 |
+
def _create_profile_context(self, profile: UserProfile) -> str:
|
22 |
+
"""Create a searchable context string from a profile."""
|
23 |
+
return f"""
|
24 |
+
Name: {profile.name}
|
25 |
+
Technical Skills: {', '.join(profile.technical_skills)}
|
26 |
+
Projects: {', '.join(profile.projects)}
|
27 |
+
AI Expertise: {', '.join(profile.ai_expertise)}
|
28 |
+
Mentoring Preferences: {profile.mentoring_preferences}
|
29 |
+
Collaboration Interests: {', '.join(profile.collaboration_interests)}
|
30 |
+
"""
|
31 |
+
|
32 |
+
def search_profiles(self, query: str, profiles: List[UserProfile]) -> List[Tuple[UserProfile, str]]:
|
33 |
+
"""
|
34 |
+
Search profiles using Groq LLM and return matches with explanations.
|
35 |
+
Returns: List of tuples (profile, explanation)
|
36 |
+
"""
|
37 |
+
if not profiles:
|
38 |
+
return []
|
39 |
+
|
40 |
+
# Create context from all profiles
|
41 |
+
profile_contexts = {str(p.id): self._create_profile_context(p) for p in profiles}
|
42 |
+
|
43 |
+
# Create the prompt for Groq
|
44 |
+
prompt = f"""You are an expert at matching engineers based on their profiles. Your task is to find the most relevant profiles that match the given search query.
|
45 |
+
|
46 |
+
Search Query: "{query}"
|
47 |
+
|
48 |
+
Available Engineer Profiles:
|
49 |
+
{'-' * 80}
|
50 |
+
"""
|
51 |
+
for pid, context in profile_contexts.items():
|
52 |
+
prompt += f"\nProfile ID: {pid}\n{context}\n{'-' * 80}"
|
53 |
+
|
54 |
+
prompt += """\nInstructions:
|
55 |
+
1. Analyze the search query and understand the key requirements.
|
56 |
+
2. Compare these requirements against each profile's skills, expertise, and preferences.
|
57 |
+
3. For each matching profile, calculate a match score (0-100) based on:
|
58 |
+
- Direct skill matches
|
59 |
+
- Related expertise
|
60 |
+
- Project experience
|
61 |
+
- Mentoring alignment
|
62 |
+
- Collaboration potential
|
63 |
+
|
64 |
+
Return your analysis in the following JSON format:
|
65 |
+
[
|
66 |
+
{
|
67 |
+
"profile_id": "exact-profile-uuid-from-above",
|
68 |
+
"match_score": number-between-0-and-100,
|
69 |
+
"explanation": "Detailed explanation of why this profile matches the search query"
|
70 |
+
}
|
71 |
+
]
|
72 |
+
|
73 |
+
Important:
|
74 |
+
- Include ANY profile that has relevant matches, even if the match score is moderate
|
75 |
+
- Be lenient with matching - if someone has related skills, they might be a good fit
|
76 |
+
- The explanation should be specific about why the profile matches
|
77 |
+
- Sort results by match_score in descending order
|
78 |
+
- Return an empty list [] if truly no profiles match
|
79 |
+
|
80 |
+
Remember: It's better to show more potential matches than to be too restrictive."""
|
81 |
+
|
82 |
+
# Get response from Groq
|
83 |
+
try:
|
84 |
+
chat_completion = self.client.chat.completions.create(
|
85 |
+
messages=[
|
86 |
+
{
|
87 |
+
"role": "system",
|
88 |
+
"content": "You are an expert at matching engineers based on their profiles. You always return valid JSON in the exact format requested."
|
89 |
+
},
|
90 |
+
{
|
91 |
+
"role": "user",
|
92 |
+
"content": prompt,
|
93 |
+
}
|
94 |
+
],
|
95 |
+
model="llama3-8b-8192",
|
96 |
+
temperature=0.2, # Slightly higher temperature for more inclusive matching
|
97 |
+
max_tokens=2000,
|
98 |
+
)
|
99 |
+
|
100 |
+
response_text = chat_completion.choices[0].message.content.strip()
|
101 |
+
|
102 |
+
# Try to extract JSON if it's wrapped in backticks or has extra text
|
103 |
+
try:
|
104 |
+
# First try direct JSON parsing
|
105 |
+
matches = json.loads(response_text)
|
106 |
+
except json.JSONDecodeError:
|
107 |
+
# Try to extract JSON from the response
|
108 |
+
import re
|
109 |
+
json_match = re.search(r'\[[\s\S]*\]', response_text)
|
110 |
+
if json_match:
|
111 |
+
try:
|
112 |
+
matches = json.loads(json_match.group(0))
|
113 |
+
except json.JSONDecodeError:
|
114 |
+
print(f"Failed to parse Groq response: {response_text}")
|
115 |
+
return self._fallback_search(query, profiles)
|
116 |
+
else:
|
117 |
+
print(f"No JSON found in response: {response_text}")
|
118 |
+
return self._fallback_search(query, profiles)
|
119 |
+
|
120 |
+
# Convert to list of tuples (profile, explanation)
|
121 |
+
results = []
|
122 |
+
for match in matches:
|
123 |
+
profile_id = match.get("profile_id")
|
124 |
+
explanation = match.get("explanation", "")
|
125 |
+
score = match.get("match_score", 0)
|
126 |
+
|
127 |
+
# Find the profile with this ID
|
128 |
+
profile = next((p for p in profiles if str(p.id) == profile_id), None)
|
129 |
+
if profile:
|
130 |
+
results.append((profile, f"Match Score: {score}%\n{explanation}"))
|
131 |
+
|
132 |
+
# If no matches found through Groq, try fallback search
|
133 |
+
if not results:
|
134 |
+
return self._fallback_search(query, profiles)
|
135 |
+
|
136 |
+
return results
|
137 |
+
|
138 |
+
except Exception as e:
|
139 |
+
print(f"Error during Groq search: {str(e)}")
|
140 |
+
return self._fallback_search(query, profiles)
|
141 |
+
|
142 |
+
def _fallback_search(self, query: str, profiles: List[UserProfile]) -> List[Tuple[UserProfile, str]]:
|
143 |
+
"""Fallback to basic keyword matching if Groq search fails."""
|
144 |
+
results = []
|
145 |
+
query_terms = query.lower().split()
|
146 |
+
|
147 |
+
for profile in profiles:
|
148 |
+
score = 0
|
149 |
+
matches = []
|
150 |
+
|
151 |
+
# Check each field for matches
|
152 |
+
profile_text = self._create_profile_context(profile).lower()
|
153 |
+
|
154 |
+
for term in query_terms:
|
155 |
+
if term in profile_text:
|
156 |
+
score += 1
|
157 |
+
# Find which field matched
|
158 |
+
if term in profile.name.lower():
|
159 |
+
matches.append(f"Name matches '{term}'")
|
160 |
+
if any(term in skill.lower() for skill in profile.technical_skills):
|
161 |
+
matches.append(f"Has technical skill related to '{term}'")
|
162 |
+
if any(term in exp.lower() for exp in profile.ai_expertise):
|
163 |
+
matches.append(f"Has AI expertise related to '{term}'")
|
164 |
+
if term in profile.mentoring_preferences.lower():
|
165 |
+
matches.append(f"Mentoring preferences match '{term}'")
|
166 |
+
|
167 |
+
if score > 0:
|
168 |
+
explanation = "Basic Match:\n" + "\n".join(matches)
|
169 |
+
results.append((profile, explanation))
|
170 |
+
|
171 |
+
return sorted(results, key=lambda x: len(x[1].split('\n')), reverse=True)
|
frontend/app.py
ADDED
@@ -0,0 +1,219 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import requests
|
3 |
+
from typing import Dict, List
|
4 |
+
import os
|
5 |
+
from dotenv import load_dotenv
|
6 |
+
|
7 |
+
# Load environment variables
|
8 |
+
load_dotenv()
|
9 |
+
|
10 |
+
# Page config
|
11 |
+
st.set_page_config(
|
12 |
+
page_title="100xEngineers Discovery Platform",
|
13 |
+
page_icon="🚀",
|
14 |
+
layout="wide",
|
15 |
+
initial_sidebar_state="expanded"
|
16 |
+
)
|
17 |
+
|
18 |
+
# API client setup
|
19 |
+
class APIClient:
|
20 |
+
def __init__(self):
|
21 |
+
# Get the Hugging Face Space URL from environment or use localhost
|
22 |
+
space_url = os.getenv("HF_SPACE_URL")
|
23 |
+
if space_url:
|
24 |
+
# In Hugging Face Spaces, the FastAPI service will be available at port 8000
|
25 |
+
self.base_url = f"https://{space_url}-8000.hf.space"
|
26 |
+
else:
|
27 |
+
# Local development
|
28 |
+
self.base_url = "http://localhost:8000"
|
29 |
+
|
30 |
+
st.sidebar.text(f"API URL: {self.base_url}")
|
31 |
+
|
32 |
+
def _handle_response(self, response):
|
33 |
+
if response.ok:
|
34 |
+
return response.json()
|
35 |
+
st.error(f"Error: {response.status_code} - {response.text}")
|
36 |
+
return None
|
37 |
+
|
38 |
+
def get(self, endpoint: str):
|
39 |
+
try:
|
40 |
+
response = requests.get(f"{self.base_url}{endpoint}")
|
41 |
+
return self._handle_response(response)
|
42 |
+
except Exception as e:
|
43 |
+
st.error(f"API Error: {str(e)}")
|
44 |
+
return None
|
45 |
+
|
46 |
+
api = APIClient()
|
47 |
+
|
48 |
+
def create_profile(profile_data: dict):
|
49 |
+
# Validate and clean the data before sending
|
50 |
+
if profile_data.get("portfolio_url"):
|
51 |
+
url = profile_data["portfolio_url"].strip()
|
52 |
+
if not (url.startswith("http://") or url.startswith("https://")):
|
53 |
+
url = f"https://{url}"
|
54 |
+
try:
|
55 |
+
# Basic URL validation
|
56 |
+
if not url.replace("https://", "").replace("http://", ""):
|
57 |
+
profile_data.pop("portfolio_url", None)
|
58 |
+
else:
|
59 |
+
profile_data["portfolio_url"] = url
|
60 |
+
except Exception:
|
61 |
+
profile_data.pop("portfolio_url", None)
|
62 |
+
else:
|
63 |
+
profile_data.pop("portfolio_url", None)
|
64 |
+
|
65 |
+
try:
|
66 |
+
response = requests.post(f"{api.base_url}/api/profiles", json=profile_data)
|
67 |
+
if response.status_code == 422:
|
68 |
+
error_detail = response.json().get('detail', [])
|
69 |
+
if isinstance(error_detail, list):
|
70 |
+
for error in error_detail:
|
71 |
+
st.error(f"Validation Error: {error.get('msg')}")
|
72 |
+
else:
|
73 |
+
st.error(f"Validation Error: {error_detail}")
|
74 |
+
return None
|
75 |
+
elif not response.ok:
|
76 |
+
st.error(f"Server Error: {response.status_code}")
|
77 |
+
return None
|
78 |
+
return response.json()
|
79 |
+
except requests.exceptions.ConnectionError:
|
80 |
+
st.error("Could not connect to the server. Please make sure the backend is running.")
|
81 |
+
return None
|
82 |
+
except Exception as e:
|
83 |
+
st.error(f"An unexpected error occurred: {str(e)}")
|
84 |
+
return None
|
85 |
+
|
86 |
+
def search_profiles(query: str):
|
87 |
+
try:
|
88 |
+
response = requests.post(
|
89 |
+
f"{api.base_url}/api/search",
|
90 |
+
json={"query": query} # Send query in correct format
|
91 |
+
)
|
92 |
+
if not response.ok:
|
93 |
+
if response.status_code == 422:
|
94 |
+
st.error("Invalid search query format")
|
95 |
+
else:
|
96 |
+
st.error(f"Search failed with status code: {response.status_code}")
|
97 |
+
return []
|
98 |
+
return response.json()
|
99 |
+
except requests.exceptions.ConnectionError:
|
100 |
+
st.error("Could not connect to the server. Please make sure the backend is running.")
|
101 |
+
return []
|
102 |
+
except Exception as e:
|
103 |
+
st.error(f"An unexpected error occurred during search: {str(e)}")
|
104 |
+
return []
|
105 |
+
|
106 |
+
def list_profiles():
|
107 |
+
try:
|
108 |
+
response = requests.get(f"{api.base_url}/api/profiles")
|
109 |
+
if not response.ok:
|
110 |
+
st.error(f"Failed to fetch profiles: {response.status_code}")
|
111 |
+
return []
|
112 |
+
return response.json()
|
113 |
+
except requests.exceptions.ConnectionError:
|
114 |
+
st.error("Could not connect to the server. Please make sure the backend is running.")
|
115 |
+
return []
|
116 |
+
except Exception as e:
|
117 |
+
st.error(f"An unexpected error occurred while fetching profiles: {str(e)}")
|
118 |
+
return []
|
119 |
+
|
120 |
+
# UI Components
|
121 |
+
st.title("100xEngineers Discovery Platform 🚀")
|
122 |
+
|
123 |
+
# Sidebar navigation
|
124 |
+
page = st.sidebar.radio("Navigation", ["Search Profiles", "Create Profile", "View All Profiles"])
|
125 |
+
|
126 |
+
if page == "Create Profile":
|
127 |
+
st.header("Create Your Profile")
|
128 |
+
|
129 |
+
with st.form("profile_form"):
|
130 |
+
name = st.text_input("Name", help="Enter your full name (minimum 2 characters)")
|
131 |
+
technical_skills = st.text_input("Technical Skills (comma-separated)",
|
132 |
+
help="Enter your technical skills, separated by commas")
|
133 |
+
projects = st.text_input("Projects (comma-separated)",
|
134 |
+
help="List your notable projects, separated by commas")
|
135 |
+
ai_expertise = st.text_input("AI Expertise (comma-separated)",
|
136 |
+
help="List your AI-related skills and expertise")
|
137 |
+
mentoring_preferences = st.text_area("Mentoring Preferences",
|
138 |
+
help="Describe your mentoring preferences (minimum 10 characters)")
|
139 |
+
collaboration_interests = st.text_input("Collaboration Interests (comma-separated)",
|
140 |
+
help="List your interests for collaboration")
|
141 |
+
portfolio_url = st.text_input("Portfolio URL",
|
142 |
+
help="Enter your portfolio URL (optional)")
|
143 |
+
|
144 |
+
submitted = st.form_submit_button("Create Profile")
|
145 |
+
|
146 |
+
if submitted:
|
147 |
+
if len(name.strip()) < 2:
|
148 |
+
st.error("Name must be at least 2 characters long")
|
149 |
+
elif len(mentoring_preferences.strip()) < 10:
|
150 |
+
st.error("Mentoring preferences must be at least 10 characters long")
|
151 |
+
else:
|
152 |
+
profile_data = {
|
153 |
+
"name": name.strip(),
|
154 |
+
"technical_skills": [s.strip() for s in technical_skills.split(",") if s.strip()],
|
155 |
+
"projects": [p.strip() for p in projects.split(",") if p.strip()],
|
156 |
+
"ai_expertise": [a.strip() for a in ai_expertise.split(",") if a.strip()],
|
157 |
+
"mentoring_preferences": mentoring_preferences.strip(),
|
158 |
+
"collaboration_interests": [c.strip() for c in collaboration_interests.split(",") if c.strip()],
|
159 |
+
"portfolio_url": portfolio_url.strip() if portfolio_url.strip() else None
|
160 |
+
}
|
161 |
+
|
162 |
+
if profile := create_profile(profile_data):
|
163 |
+
st.success("Profile created successfully!")
|
164 |
+
st.json(profile)
|
165 |
+
|
166 |
+
elif page == "Search Profiles":
|
167 |
+
st.header("Search Profiles")
|
168 |
+
|
169 |
+
st.markdown("""
|
170 |
+
Search for engineers using natural language. Examples:
|
171 |
+
- "Find someone experienced in machine learning and NLP"
|
172 |
+
- "Looking for a mentor in backend development"
|
173 |
+
- "Need a collaborator for an open source AI project"
|
174 |
+
""")
|
175 |
+
|
176 |
+
query = st.text_input("Enter your search query in natural language")
|
177 |
+
|
178 |
+
if query:
|
179 |
+
results = search_profiles(query)
|
180 |
+
|
181 |
+
if results:
|
182 |
+
st.subheader(f"Found {len(results)} matches")
|
183 |
+
for result in results:
|
184 |
+
profile = result['profile']
|
185 |
+
explanation = result['explanation']
|
186 |
+
|
187 |
+
with st.expander(f"{profile['name']}"):
|
188 |
+
# Display match explanation
|
189 |
+
st.markdown(f"**Match Analysis:**\n{explanation}")
|
190 |
+
st.markdown("---")
|
191 |
+
|
192 |
+
# Display profile details
|
193 |
+
st.write("**Technical Skills:**", ", ".join(profile["technical_skills"]))
|
194 |
+
st.write("**AI Expertise:**", ", ".join(profile["ai_expertise"]))
|
195 |
+
st.write("**Projects:**", ", ".join(profile["projects"]))
|
196 |
+
st.write("**Mentoring Preferences:**", profile["mentoring_preferences"])
|
197 |
+
st.write("**Collaboration Interests:**", ", ".join(profile["collaboration_interests"]))
|
198 |
+
if profile.get("portfolio_url"):
|
199 |
+
st.write("**Portfolio:**", profile["portfolio_url"])
|
200 |
+
else:
|
201 |
+
st.info("No matching profiles found. Try adjusting your search query.")
|
202 |
+
|
203 |
+
else: # View All Profiles
|
204 |
+
st.header("All Profiles")
|
205 |
+
|
206 |
+
profiles = list_profiles()
|
207 |
+
|
208 |
+
if profiles:
|
209 |
+
for profile in profiles:
|
210 |
+
with st.expander(f"{profile['name']}"):
|
211 |
+
st.write("**Technical Skills:**", ", ".join(profile["technical_skills"]))
|
212 |
+
st.write("**AI Expertise:**", ", ".join(profile["ai_expertise"]))
|
213 |
+
st.write("**Projects:**", ", ".join(profile["projects"]))
|
214 |
+
st.write("**Mentoring Preferences:**", profile["mentoring_preferences"])
|
215 |
+
st.write("**Collaboration Interests:**", ", ".join(profile["collaboration_interests"]))
|
216 |
+
if profile.get("portfolio_url"):
|
217 |
+
st.write("**Portfolio:**", profile["portfolio_url"])
|
218 |
+
else:
|
219 |
+
st.info("No profiles found. Create one to get started!")
|
requirements.txt
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
fastapi==0.104.1
|
2 |
+
uvicorn==0.24.0
|
3 |
+
pydantic==2.5.2
|
4 |
+
streamlit==1.29.0
|
5 |
+
requests==2.31.0
|
6 |
+
python-multipart==0.0.6
|
7 |
+
python-jose[cryptography]==3.3.0
|
8 |
+
passlib[bcrypt]==1.7.4
|
9 |
+
python-dotenv==1.0.0
|
10 |
+
groq==0.4.2
|
11 |
+
python-dotenv==1.0.0
|
run.py
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import uvicorn
|
2 |
+
|
3 |
+
if __name__ == "__main__":
|
4 |
+
uvicorn.run("app.main:app", host="127.0.0.1", port=8000, reload=True)
|