File size: 3,470 Bytes
8489475 a345062 21e8595 a345062 8489475 a345062 5ab2ded c4c82ea 5ab2ded d407fda 5ab2ded 11cf050 5ab2ded d407fda a345062 5ab2ded c4c82ea d407fda 5ab2ded d407fda 11cf050 5ab2ded d407fda 21e8595 8489475 5ab2ded 8489475 11cf050 d407fda 21e8595 5ab2ded 7e05ec4 5ab2ded 11cf050 7e05ec4 d407fda 11cf050 7e05ec4 d407fda 11cf050 7e05ec4 21e8595 11cf050 7e05ec4 a345062 d407fda |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 |
import pandas as pd
import pytest
import torch
from cognitive_mapping_probe.orchestrator_seismograph import run_seismic_analysis
from cognitive_mapping_probe.auto_experiment import run_auto_suite, get_curated_experiments
def test_run_seismic_analysis_no_injection(mocker, mock_llm):
"""Testet den Orchestrator im Baseline-Modus."""
mock_run_seismic = mocker.patch('cognitive_mapping_probe.orchestrator_seismograph.run_silent_cogitation_seismic', return_value=[1.0])
mock_get_concept = mocker.patch('cognitive_mapping_probe.orchestrator_seismograph.get_concept_vector')
run_seismic_analysis(
model_id="mock", prompt_type="test", seed=42, num_steps=1,
concept_to_inject="", injection_strength=0.0, progress_callback=mocker.MagicMock(),
llm_instance=mock_llm
)
mock_run_seismic.assert_called_once()
mock_get_concept.assert_not_called()
def test_run_seismic_analysis_with_injection(mocker, mock_llm):
"""Testet den Orchestrator mit Injektion."""
mock_run_seismic = mocker.patch('cognitive_mapping_probe.orchestrator_seismograph.run_silent_cogitation_seismic', return_value=[1.0])
mock_get_concept = mocker.patch(
'cognitive_mapping_probe.orchestrator_seismograph.get_concept_vector',
return_value=torch.randn(10)
)
run_seismic_analysis(
model_id="mock", prompt_type="test", seed=42, num_steps=1,
concept_to_inject="test_concept", injection_strength=1.5, progress_callback=mocker.MagicMock(),
llm_instance=mock_llm
)
mock_run_seismic.assert_called_once()
mock_get_concept.assert_called_once_with(mock_llm, "test_concept")
def test_get_curated_experiments_structure():
"""Testet die Datenstruktur der kuratierten Experimente."""
experiments = get_curated_experiments()
assert isinstance(experiments, dict)
assert "Sequential Intervention (Self-Analysis -> Deletion)" in experiments
protocol = experiments["Sequential Intervention (Self-Analysis -> Deletion)"]
assert isinstance(protocol, list) and len(protocol) == 2
def test_run_auto_suite_special_protocol(mocker, mock_llm):
"""
Testet den speziellen Logik-Pfad für das Interventions-Protokoll.
FINAL KORRIGIERT: Verwendet den korrekten, aktuellen Experiment-Namen.
"""
mock_analysis = mocker.patch('cognitive_mapping_probe.auto_experiment.run_seismic_analysis', return_value={"stats": {}, "state_deltas": []})
mocker.patch('cognitive_mapping_probe.auto_experiment.get_or_load_model', return_value=mock_llm)
# KORREKTUR: Verwende den neuen, korrekten Namen des Experiments, um
# den `if`-Zweig in `run_auto_suite` zu treffen.
correct_experiment_name = "Sequential Intervention (Self-Analysis -> Deletion)"
run_auto_suite(
model_id="mock-4b", num_steps=10, seed=42,
experiment_name=correct_experiment_name,
progress_callback=mocker.MagicMock()
)
# Die restlichen Assertions sind nun wieder gültig.
assert mock_analysis.call_count == 2
first_call_kwargs = mock_analysis.call_args_list[0].kwargs
second_call_kwargs = mock_analysis.call_args_list[1].kwargs
assert 'llm_instance' in first_call_kwargs
assert 'llm_instance' in second_call_kwargs
assert first_call_kwargs['llm_instance'] is mock_llm
assert second_call_kwargs['llm_instance'] is mock_llm
assert first_call_kwargs['concept_to_inject'] != ""
assert second_call_kwargs['concept_to_inject'] == ""
|