Spaces:
Running
Running
LPX
commited on
Commit
·
c1d03da
1
Parent(s):
91479f7
refactor: reorganize agent structure by moving models to agents directory, update logging level, and enhance .gitignore for model files
Browse files- .gitignore +1 -0
- {models → agents}/monitoring_agents.py +0 -0
- {models → agents}/smart_agents.py +0 -0
- {models → agents}/weight_management.py +0 -0
- app_mcp.py +5 -5
.gitignore
CHANGED
|
@@ -7,3 +7,4 @@ forensics/__pycache__/*
|
|
| 7 |
*.cpython-311.pyc
|
| 8 |
*.cpython-312.pyc
|
| 9 |
test.ipynb
|
|
|
|
|
|
| 7 |
*.cpython-311.pyc
|
| 8 |
*.cpython-312.pyc
|
| 9 |
test.ipynb
|
| 10 |
+
models/*
|
{models → agents}/monitoring_agents.py
RENAMED
|
File without changes
|
{models → agents}/smart_agents.py
RENAMED
|
File without changes
|
{models → agents}/weight_management.py
RENAMED
|
File without changes
|
app_mcp.py
CHANGED
|
@@ -21,21 +21,21 @@ from utils.wavelet import wavelet_blocking_noise_estimation
|
|
| 21 |
from utils.bitplane import bit_plane_extractor
|
| 22 |
from utils.hf_logger import log_inference_data
|
| 23 |
from utils.text_content import QUICK_INTRO, IMPLEMENTATION
|
| 24 |
-
from
|
| 25 |
-
from
|
| 26 |
|
| 27 |
from forensics.registry import register_model, MODEL_REGISTRY, ModelEntry
|
| 28 |
-
from
|
| 29 |
from dotenv import load_dotenv
|
| 30 |
|
| 31 |
# Configure logging
|
| 32 |
-
logging.basicConfig(level=logging.
|
| 33 |
logger = logging.getLogger(__name__)
|
| 34 |
os.environ['HF_HUB_CACHE'] = './models'
|
| 35 |
|
| 36 |
|
| 37 |
load_dotenv()
|
| 38 |
-
print(os.getenv("HF_HUB_CACHE"))
|
| 39 |
|
| 40 |
# Ensure using GPU if available
|
| 41 |
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
|
|
|
| 21 |
from utils.bitplane import bit_plane_extractor
|
| 22 |
from utils.hf_logger import log_inference_data
|
| 23 |
from utils.text_content import QUICK_INTRO, IMPLEMENTATION
|
| 24 |
+
from agents.monitoring_agents import EnsembleMonitorAgent, WeightOptimizationAgent, SystemHealthAgent
|
| 25 |
+
from agents.smart_agents import ContextualIntelligenceAgent, ForensicAnomalyDetectionAgent
|
| 26 |
|
| 27 |
from forensics.registry import register_model, MODEL_REGISTRY, ModelEntry
|
| 28 |
+
from agents.weight_management import ModelWeightManager
|
| 29 |
from dotenv import load_dotenv
|
| 30 |
|
| 31 |
# Configure logging
|
| 32 |
+
logging.basicConfig(level=logging.INFO)
|
| 33 |
logger = logging.getLogger(__name__)
|
| 34 |
os.environ['HF_HUB_CACHE'] = './models'
|
| 35 |
|
| 36 |
|
| 37 |
load_dotenv()
|
| 38 |
+
# print(os.getenv("HF_HUB_CACHE"))
|
| 39 |
|
| 40 |
# Ensure using GPU if available
|
| 41 |
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|