|
import pytest |
|
import pandas as pd |
|
import gradio as gr |
|
|
|
|
|
|
|
from ankigen_core.ui_logic import update_mode_visibility, use_selected_subjects |
|
from ankigen_core.learning_path import analyze_learning_path |
|
from ankigen_core.card_generator import ( |
|
orchestrate_card_generation, |
|
) |
|
from ankigen_core.exporters import export_csv, export_deck |
|
|
|
|
|
from unittest.mock import patch, MagicMock, ANY |
|
|
|
|
|
from ankigen_core.models import Card, CardFront, CardBack |
|
|
|
|
|
MOCK_SUBJECT_INPUT = "Initial Subject" |
|
MOCK_DESCRIPTION_INPUT = "Initial Description" |
|
MOCK_TEXT_INPUT = "Initial Text Input" |
|
MOCK_URL_INPUT = "http://initial.url" |
|
|
|
EXPECTED_UI_LOGIC_KEYS_MODE_VISIBILITY = [ |
|
"subject_mode_group", |
|
"path_mode_group", |
|
"text_mode_group", |
|
"web_mode_group", |
|
"path_results_group", |
|
"cards_output_group", |
|
"subject_textbox", |
|
"description_textbox", |
|
"source_text_textbox", |
|
"url_textbox", |
|
"output_dataframe", |
|
"subjects_dataframe", |
|
"learning_order_markdown", |
|
"projects_markdown", |
|
"progress_html", |
|
"total_cards_number", |
|
] |
|
|
|
EXPECTED_UI_LOGIC_KEYS_USE_SUBJECTS = [ |
|
"generation_mode_radio", |
|
"subject_mode_group", |
|
"path_mode_group", |
|
"text_mode_group", |
|
"web_mode_group", |
|
"path_results_group", |
|
"cards_output_group", |
|
"subject_textbox", |
|
"description_textbox", |
|
"source_text_textbox", |
|
"url_textbox", |
|
"topic_number_slider", |
|
"preference_prompt_textbox", |
|
"output_dataframe", |
|
"subjects_dataframe", |
|
"learning_order_markdown", |
|
"projects_markdown", |
|
"progress_html", |
|
"total_cards_number", |
|
] |
|
|
|
|
|
@pytest.mark.parametrize( |
|
"mode, expected_visibilities, expected_values", |
|
[ |
|
( |
|
"subject", |
|
{ |
|
"subject_mode_group": True, |
|
"path_mode_group": False, |
|
"text_mode_group": False, |
|
"web_mode_group": False, |
|
"path_results_group": False, |
|
"cards_output_group": True, |
|
}, |
|
{ |
|
"subject_textbox": MOCK_SUBJECT_INPUT, |
|
"description_textbox": "", |
|
"source_text_textbox": "", |
|
"url_textbox": "", |
|
}, |
|
), |
|
( |
|
"path", |
|
{ |
|
"subject_mode_group": False, |
|
"path_mode_group": True, |
|
"text_mode_group": False, |
|
"web_mode_group": False, |
|
"path_results_group": True, |
|
"cards_output_group": False, |
|
}, |
|
{ |
|
"subject_textbox": "", |
|
"description_textbox": MOCK_DESCRIPTION_INPUT, |
|
"source_text_textbox": "", |
|
"url_textbox": "", |
|
}, |
|
), |
|
( |
|
"text", |
|
{ |
|
"subject_mode_group": False, |
|
"path_mode_group": False, |
|
"text_mode_group": True, |
|
"web_mode_group": False, |
|
"path_results_group": False, |
|
"cards_output_group": True, |
|
}, |
|
{ |
|
"subject_textbox": "", |
|
"description_textbox": "", |
|
"source_text_textbox": MOCK_TEXT_INPUT, |
|
"url_textbox": "", |
|
}, |
|
), |
|
( |
|
"web", |
|
{ |
|
"subject_mode_group": False, |
|
"path_mode_group": False, |
|
"text_mode_group": False, |
|
"web_mode_group": True, |
|
"path_results_group": False, |
|
"cards_output_group": True, |
|
}, |
|
{ |
|
"subject_textbox": "", |
|
"description_textbox": "", |
|
"source_text_textbox": "", |
|
"url_textbox": MOCK_URL_INPUT, |
|
}, |
|
), |
|
], |
|
) |
|
def test_generation_mode_change_updates_ui_correctly( |
|
mode, expected_visibilities, expected_values |
|
): |
|
""" |
|
Tests that changing the generation_mode correctly calls update_mode_visibility |
|
and the returned dictionary would update app.py's UI components as expected. |
|
""" |
|
result_dict = update_mode_visibility( |
|
mode=mode, |
|
current_subject=MOCK_SUBJECT_INPUT, |
|
current_description=MOCK_DESCRIPTION_INPUT, |
|
current_text=MOCK_TEXT_INPUT, |
|
current_url=MOCK_URL_INPUT, |
|
) |
|
|
|
|
|
for key in EXPECTED_UI_LOGIC_KEYS_MODE_VISIBILITY: |
|
assert key in result_dict, f"Key {key} missing in result for mode {mode}" |
|
|
|
|
|
for component_key, expected_visibility in expected_visibilities.items(): |
|
assert ( |
|
result_dict[component_key]["visible"] == expected_visibility |
|
), f"Visibility for {component_key} in mode '{mode}' was not {expected_visibility}" |
|
|
|
|
|
for component_key, expected_value in expected_values.items(): |
|
assert ( |
|
result_dict[component_key]["value"] == expected_value |
|
), f"Value for {component_key} in mode '{mode}' was not '{expected_value}'" |
|
|
|
|
|
assert result_dict["output_dataframe"]["value"] is None |
|
assert result_dict["subjects_dataframe"]["value"] is None |
|
assert result_dict["learning_order_markdown"]["value"] == "" |
|
assert result_dict["projects_markdown"]["value"] == "" |
|
assert result_dict["progress_html"]["value"] == "" |
|
assert result_dict["progress_html"]["visible"] is False |
|
assert result_dict["total_cards_number"]["value"] == 0 |
|
assert result_dict["total_cards_number"]["visible"] is False |
|
|
|
|
|
@patch("ankigen_core.learning_path.structured_output_completion") |
|
@patch("ankigen_core.learning_path.OpenAIClientManager") |
|
@patch("ankigen_core.learning_path.ResponseCache") |
|
def test_analyze_learning_path_button_click( |
|
mock_response_cache_class, mock_client_manager_class, mock_soc |
|
): |
|
""" |
|
Tests that the analyze_button.click event (calling analyze_learning_path) |
|
processes inputs and produces outputs correctly for UI update. |
|
""" |
|
|
|
mock_client_manager_instance = mock_client_manager_class.return_value |
|
mock_openai_client = MagicMock() |
|
mock_client_manager_instance.get_client.return_value = mock_openai_client |
|
mock_client_manager_instance.initialize_client.return_value = ( |
|
None |
|
) |
|
|
|
mock_cache_instance = mock_response_cache_class.return_value |
|
mock_cache_instance.get.return_value = None |
|
|
|
|
|
test_api_key = "sk-testkey123" |
|
test_description = "Become a data scientist" |
|
test_model = "gpt-4.1-test" |
|
|
|
|
|
mock_llm_response = { |
|
"subjects": [ |
|
{ |
|
"Subject": "Python Basics", |
|
"Prerequisites": "None", |
|
"Time Estimate": "4 weeks", |
|
}, |
|
{ |
|
"Subject": "Pandas & NumPy", |
|
"Prerequisites": "Python Basics", |
|
"Time Estimate": "3 weeks", |
|
}, |
|
], |
|
"learning_order": "1. Python Basics\n2. Pandas & NumPy", |
|
"projects": "Analyze a public dataset.", |
|
} |
|
mock_soc.return_value = mock_llm_response |
|
|
|
|
|
df_subjects, md_order, md_projects = analyze_learning_path( |
|
client_manager=mock_client_manager_instance, |
|
cache=mock_cache_instance, |
|
api_key=test_api_key, |
|
description=test_description, |
|
model=test_model, |
|
) |
|
|
|
|
|
mock_client_manager_instance.initialize_client.assert_called_once_with(test_api_key) |
|
mock_client_manager_instance.get_client.assert_called_once() |
|
mock_soc.assert_called_once_with( |
|
openai_client=mock_openai_client, |
|
model=test_model, |
|
response_format={"type": "json_object"}, |
|
system_prompt=ANY, |
|
user_prompt=ANY, |
|
cache=mock_cache_instance, |
|
) |
|
|
|
assert test_description in mock_soc.call_args[1]["user_prompt"] |
|
|
|
|
|
assert isinstance(df_subjects, pd.DataFrame) |
|
assert len(df_subjects) == 2 |
|
assert df_subjects.iloc[0]["Subject"] == "Python Basics" |
|
assert list(df_subjects.columns) == ["Subject", "Prerequisites", "Time Estimate"] |
|
|
|
|
|
assert "Python Basics" in md_order |
|
assert "Pandas & NumPy" in md_order |
|
assert "Analyze a public dataset." in md_projects |
|
|
|
|
|
with pytest.raises(gr.Error, match="API key is required"): |
|
analyze_learning_path( |
|
client_manager=mock_client_manager_instance, |
|
cache=mock_cache_instance, |
|
api_key="", |
|
description=test_description, |
|
model=test_model, |
|
) |
|
|
|
|
|
mock_soc.return_value = {"wrong_key": "data"} |
|
with pytest.raises(gr.Error, match="invalid API response format"): |
|
analyze_learning_path( |
|
client_manager=mock_client_manager_instance, |
|
cache=mock_cache_instance, |
|
api_key=test_api_key, |
|
description=test_description, |
|
model=test_model, |
|
) |
|
|
|
|
|
def test_use_selected_subjects_button_click_success(): |
|
"""Test that use_subjects_button.click (calling use_selected_subjects) works correctly.""" |
|
sample_data = { |
|
"Subject": ["Intro to Python", "Data Structures", "Algorithms"], |
|
"Prerequisites": ["None", "Intro to Python", "Data Structures"], |
|
"Time Estimate": ["2 weeks", "3 weeks", "4 weeks"], |
|
} |
|
subjects_df = pd.DataFrame(sample_data) |
|
|
|
result_dict = use_selected_subjects(subjects_df) |
|
|
|
|
|
for key in EXPECTED_UI_LOGIC_KEYS_USE_SUBJECTS: |
|
assert key in result_dict, f"Key {key} missing in use_selected_subjects result" |
|
|
|
|
|
assert result_dict["generation_mode_radio"] == "subject" |
|
assert ( |
|
result_dict["subject_textbox"] == "Intro to Python, Data Structures, Algorithms" |
|
) |
|
assert result_dict["topic_number_slider"] == 4 |
|
assert ( |
|
"connections between these subjects" in result_dict["preference_prompt_textbox"] |
|
) |
|
assert result_dict["description_textbox"] == "" |
|
assert result_dict["source_text_textbox"] == "" |
|
assert result_dict["url_textbox"] == "" |
|
assert result_dict["subjects_dataframe"] is subjects_df |
|
|
|
|
|
assert result_dict["subject_mode_group"]["visible"] is True |
|
assert result_dict["path_mode_group"]["visible"] is False |
|
assert result_dict["text_mode_group"]["visible"] is False |
|
assert result_dict["web_mode_group"]["visible"] is False |
|
assert result_dict["path_results_group"]["visible"] is False |
|
assert result_dict["cards_output_group"]["visible"] is True |
|
|
|
|
|
assert result_dict["output_dataframe"]["value"] is None |
|
assert result_dict["progress_html"]["visible"] is False |
|
assert result_dict["total_cards_number"]["visible"] is False |
|
|
|
|
|
|
|
assert isinstance(result_dict["learning_order_markdown"], dict) |
|
assert result_dict["learning_order_markdown"].get("__type__") == "update" |
|
assert len(result_dict["learning_order_markdown"]) == 1 |
|
|
|
assert isinstance(result_dict["projects_markdown"], dict) |
|
assert result_dict["projects_markdown"].get("__type__") == "update" |
|
assert len(result_dict["projects_markdown"]) == 1 |
|
|
|
|
|
@patch("ankigen_core.ui_logic.gr.Warning") |
|
def test_use_selected_subjects_button_click_none_df(mock_gr_warning): |
|
"""Test use_selected_subjects with None DataFrame input.""" |
|
result_dict = use_selected_subjects(None) |
|
mock_gr_warning.assert_called_once_with( |
|
"No subjects available to copy from Learning Path analysis." |
|
) |
|
|
|
for key in EXPECTED_UI_LOGIC_KEYS_USE_SUBJECTS: |
|
assert key in result_dict |
|
assert isinstance(result_dict[key], dict) |
|
assert result_dict[key].get("__type__") == "update" |
|
assert len(result_dict[key]) == 1 |
|
|
|
|
|
@patch("ankigen_core.ui_logic.gr.Warning") |
|
def test_use_selected_subjects_button_click_empty_df(mock_gr_warning): |
|
"""Test use_selected_subjects with an empty DataFrame.""" |
|
result_dict = use_selected_subjects(pd.DataFrame()) |
|
mock_gr_warning.assert_called_once_with( |
|
"No subjects available to copy from Learning Path analysis." |
|
) |
|
for key in EXPECTED_UI_LOGIC_KEYS_USE_SUBJECTS: |
|
assert key in result_dict |
|
assert isinstance(result_dict[key], dict) |
|
assert result_dict[key].get("__type__") == "update" |
|
assert len(result_dict[key]) == 1 |
|
|
|
|
|
@patch("ankigen_core.ui_logic.gr.Error") |
|
def test_use_selected_subjects_button_click_missing_column(mock_gr_error): |
|
"""Test use_selected_subjects with DataFrame missing 'Subject' column.""" |
|
result_dict = use_selected_subjects(pd.DataFrame({"WrongColumn": ["data"]})) |
|
mock_gr_error.assert_called_once_with( |
|
"Learning path analysis result is missing the 'Subject' column." |
|
) |
|
for key in EXPECTED_UI_LOGIC_KEYS_USE_SUBJECTS: |
|
assert key in result_dict |
|
assert isinstance(result_dict[key], dict) |
|
assert result_dict[key].get("__type__") == "update" |
|
assert len(result_dict[key]) == 1 |
|
|
|
|
|
|
|
|
|
|
|
|
|
def get_orchestrator_mock_inputs(generation_mode="subject", api_key="sk-test"): |
|
return { |
|
"api_key_input": api_key, |
|
"subject": "Test Subject for Orchestrator", |
|
"generation_mode": generation_mode, |
|
"source_text": "Some source text for testing.", |
|
"url_input": "http://example.com/test-page", |
|
"model_name": "gpt-test-orchestrator", |
|
"topic_number": 2, |
|
"cards_per_topic": 3, |
|
"preference_prompt": "Test preferences", |
|
"generate_cloze": False, |
|
} |
|
|
|
|
|
@patch("ankigen_core.card_generator.generate_cards_batch") |
|
@patch("ankigen_core.card_generator.structured_output_completion") |
|
@patch("ankigen_core.card_generator.OpenAIClientManager") |
|
@patch("ankigen_core.card_generator.ResponseCache") |
|
@patch( |
|
"ankigen_core.card_generator.gr" |
|
) |
|
def test_generate_button_click_subject_mode( |
|
mock_gr, mock_response_cache_class, mock_client_manager_class, mock_soc, mock_gcb |
|
): |
|
"""Test orchestrate_card_generation for 'subject' mode.""" |
|
mock_client_manager_instance = mock_client_manager_class.return_value |
|
mock_openai_client = MagicMock() |
|
mock_client_manager_instance.get_client.return_value = mock_openai_client |
|
|
|
mock_cache_instance = mock_response_cache_class.return_value |
|
mock_cache_instance.get.return_value = None |
|
|
|
mock_inputs = get_orchestrator_mock_inputs(generation_mode="subject") |
|
|
|
|
|
mock_topic_response = { |
|
"topics": [ |
|
{"name": "Topic Alpha", "difficulty": "easy", "description": "First topic"}, |
|
{ |
|
"name": "Topic Beta", |
|
"difficulty": "medium", |
|
"description": "Second topic", |
|
}, |
|
] |
|
} |
|
|
|
mock_cards_batch_alpha = [ |
|
Card( |
|
front=CardFront(question="Q_A1"), |
|
back=CardBack(answer="A_A1", explanation="E_A1", example="Ex_A1"), |
|
), |
|
Card( |
|
front=CardFront(question="Q_A2"), |
|
back=CardBack(answer="A_A2", explanation="E_A2", example="Ex_A2"), |
|
), |
|
] |
|
mock_cards_batch_beta = [ |
|
Card( |
|
front=CardFront(question="Q_B1"), |
|
back=CardBack(answer="A_B1", explanation="E_B1", example="Ex_B1"), |
|
), |
|
] |
|
|
|
|
|
mock_soc.return_value = mock_topic_response |
|
mock_gcb.side_effect = [mock_cards_batch_alpha, mock_cards_batch_beta] |
|
|
|
df_result, status_html, count = orchestrate_card_generation( |
|
client_manager=mock_client_manager_instance, |
|
cache=mock_cache_instance, |
|
**mock_inputs, |
|
) |
|
|
|
mock_client_manager_instance.initialize_client.assert_called_once_with( |
|
mock_inputs["api_key_input"] |
|
) |
|
|
|
|
|
mock_soc.assert_called_once_with( |
|
openai_client=mock_openai_client, |
|
model=mock_inputs["model_name"], |
|
response_format={"type": "json_object"}, |
|
system_prompt=ANY, |
|
user_prompt=ANY, |
|
cache=mock_cache_instance, |
|
) |
|
assert mock_inputs["subject"] in mock_soc.call_args[1]["user_prompt"] |
|
assert str(mock_inputs["topic_number"]) in mock_soc.call_args[1]["user_prompt"] |
|
|
|
|
|
assert mock_gcb.call_count == 2 |
|
mock_gcb.assert_any_call( |
|
openai_client=mock_openai_client, |
|
cache=mock_cache_instance, |
|
model=mock_inputs["model_name"], |
|
topic="Topic Alpha", |
|
num_cards=mock_inputs["cards_per_topic"], |
|
system_prompt=ANY, |
|
generate_cloze=False, |
|
) |
|
mock_gcb.assert_any_call( |
|
openai_client=mock_openai_client, |
|
cache=mock_cache_instance, |
|
model=mock_inputs["model_name"], |
|
topic="Topic Beta", |
|
num_cards=mock_inputs["cards_per_topic"], |
|
system_prompt=ANY, |
|
generate_cloze=False, |
|
) |
|
|
|
assert isinstance(df_result, pd.DataFrame) |
|
assert len(df_result) == 3 |
|
assert count == 3 |
|
assert "Generation complete!" in status_html |
|
assert "Total cards generated: 3" in status_html |
|
|
|
|
|
|
|
|
|
assert mock_gr.Info.called |
|
|
|
|
|
@patch("ankigen_core.card_generator.structured_output_completion") |
|
@patch("ankigen_core.card_generator.OpenAIClientManager") |
|
@patch("ankigen_core.card_generator.ResponseCache") |
|
@patch("ankigen_core.card_generator.gr") |
|
def test_generate_button_click_text_mode( |
|
mock_gr, mock_response_cache_class, mock_client_manager_class, mock_soc |
|
): |
|
"""Test orchestrate_card_generation for 'text' mode.""" |
|
mock_client_manager_instance = mock_client_manager_class.return_value |
|
mock_openai_client = MagicMock() |
|
mock_client_manager_instance.get_client.return_value = mock_openai_client |
|
|
|
mock_cache_instance = mock_response_cache_class.return_value |
|
mock_cache_instance.get.return_value = None |
|
|
|
mock_inputs = get_orchestrator_mock_inputs(generation_mode="text") |
|
|
|
|
|
mock_card_data_from_text = { |
|
"cards": [ |
|
{ |
|
"card_type": "basic", |
|
"front": {"question": "Q_Text1"}, |
|
"back": { |
|
"answer": "A_Text1", |
|
"explanation": "E_Text1", |
|
"example": "Ex_Text1", |
|
}, |
|
"metadata": {}, |
|
}, |
|
{ |
|
"card_type": "cloze", |
|
"front": {"question": "{{c1::Q_Text2}}"}, |
|
"back": { |
|
"answer": "A_Text2_Full", |
|
"explanation": "E_Text2", |
|
"example": "Ex_Text2", |
|
}, |
|
"metadata": {}, |
|
}, |
|
] |
|
} |
|
mock_soc.return_value = mock_card_data_from_text |
|
|
|
|
|
|
|
df_result, status_html, count = orchestrate_card_generation( |
|
client_manager=mock_client_manager_instance, |
|
cache=mock_cache_instance, |
|
**mock_inputs, |
|
) |
|
|
|
mock_client_manager_instance.initialize_client.assert_called_once_with( |
|
mock_inputs["api_key_input"] |
|
) |
|
|
|
|
|
mock_soc.assert_called_once_with( |
|
openai_client=mock_openai_client, |
|
model=mock_inputs["model_name"], |
|
response_format={"type": "json_object"}, |
|
system_prompt=ANY, |
|
user_prompt=ANY, |
|
cache=mock_cache_instance, |
|
) |
|
|
|
assert mock_inputs["source_text"] in mock_soc.call_args[1]["user_prompt"] |
|
|
|
assert str(mock_inputs["cards_per_topic"]) in mock_soc.call_args[1]["user_prompt"] |
|
|
|
assert isinstance(df_result, pd.DataFrame) |
|
assert len(df_result) == 2 |
|
assert count == 2 |
|
mock_gr.Info.assert_any_call("✅ Generated 2 cards from the provided content.") |
|
assert "Generation complete!" in status_html |
|
assert "Total cards generated: 2" in status_html |
|
assert mock_gr.Info.called |
|
|
|
|
|
@patch("ankigen_core.card_generator.fetch_webpage_text") |
|
@patch("ankigen_core.card_generator.structured_output_completion") |
|
@patch("ankigen_core.card_generator.OpenAIClientManager") |
|
@patch("ankigen_core.card_generator.ResponseCache") |
|
@patch("ankigen_core.card_generator.gr") |
|
def test_generate_button_click_web_mode( |
|
mock_gr, |
|
mock_response_cache_class, |
|
mock_client_manager_class, |
|
mock_soc, |
|
mock_fetch_web, |
|
): |
|
"""Test orchestrate_card_generation for 'web' mode.""" |
|
mock_client_manager_instance = mock_client_manager_class.return_value |
|
mock_openai_client = MagicMock() |
|
mock_client_manager_instance.get_client.return_value = mock_openai_client |
|
|
|
mock_cache_instance = mock_response_cache_class.return_value |
|
mock_cache_instance.get.return_value = None |
|
|
|
mock_inputs = get_orchestrator_mock_inputs(generation_mode="web") |
|
mock_fetched_text = "This is the text fetched from the website." |
|
mock_fetch_web.return_value = mock_fetched_text |
|
|
|
mock_card_data_from_web = { |
|
"cards": [ |
|
{ |
|
"card_type": "basic", |
|
"front": {"question": "Q_Web1"}, |
|
"back": { |
|
"answer": "A_Web1", |
|
"explanation": "E_Web1", |
|
"example": "Ex_Web1", |
|
}, |
|
"metadata": {}, |
|
} |
|
] |
|
} |
|
mock_soc.return_value = mock_card_data_from_web |
|
|
|
|
|
df_result, status_html, count = orchestrate_card_generation( |
|
client_manager=mock_client_manager_instance, |
|
cache=mock_cache_instance, |
|
**mock_inputs, |
|
) |
|
assert isinstance(df_result, pd.DataFrame) |
|
assert len(df_result) == 1 |
|
assert count == 1 |
|
mock_gr.Info.assert_any_call( |
|
f"✅ Successfully fetched text (approx. {len(mock_fetched_text)} chars). Starting AI generation..." |
|
) |
|
mock_gr.Info.assert_any_call("✅ Generated 1 cards from the provided content.") |
|
assert "Generation complete!" in status_html |
|
|
|
|
|
mock_fetch_web.reset_mock() |
|
mock_soc.reset_mock() |
|
mock_gr.reset_mock() |
|
mock_client_manager_instance.initialize_client.reset_mock() |
|
|
|
fetch_error_message = "Could not connect to host" |
|
mock_fetch_web.side_effect = ConnectionError(fetch_error_message) |
|
|
|
|
|
df_err, html_err, count_err = orchestrate_card_generation( |
|
client_manager=mock_client_manager_instance, |
|
cache=mock_cache_instance, |
|
**mock_inputs, |
|
) |
|
|
|
|
|
mock_gr.Error.assert_called_once_with( |
|
f"Failed to get content from URL: {fetch_error_message}" |
|
) |
|
assert df_err.empty |
|
assert html_err == "Failed to get content from URL." |
|
assert count_err == 0 |
|
mock_soc.assert_not_called() |
|
|
|
|
|
|
|
@patch("ankigen_core.card_generator.OpenAIClientManager") |
|
@patch("ankigen_core.card_generator.ResponseCache") |
|
@patch("ankigen_core.card_generator.gr") |
|
def test_generate_button_click_path_mode_error( |
|
mock_gr, |
|
mock_response_cache_class, |
|
mock_client_manager_class, |
|
): |
|
"""Test that 'path' mode calls gr.Error for being unsupported.""" |
|
mock_client_manager_instance = mock_client_manager_class.return_value |
|
mock_cache_instance = mock_response_cache_class.return_value |
|
mock_inputs = get_orchestrator_mock_inputs(generation_mode="path") |
|
|
|
|
|
df_err, html_err, count_err = orchestrate_card_generation( |
|
client_manager=mock_client_manager_instance, |
|
cache=mock_cache_instance, |
|
**mock_inputs, |
|
) |
|
|
|
|
|
mock_gr.Error.assert_called_once_with("Unsupported generation mode selected: path") |
|
assert df_err.empty |
|
assert html_err == "Unsupported mode." |
|
assert count_err == 0 |
|
|
|
|
|
|
|
|
|
|
|
|
|
def test_export_csv_button_click(mocker): |
|
"""Test that export_csv_button click calls the correct core function.""" |
|
|
|
mock_export_csv_in_test_module = mocker.patch( |
|
"tests.integration.test_app_interactions.export_csv" |
|
) |
|
|
|
|
|
sample_df_data = { |
|
"Index": ["1.1"], |
|
"Topic": ["T1"], |
|
"Card_Type": ["basic"], |
|
"Question": ["Q1"], |
|
"Answer": ["A1"], |
|
"Explanation": ["E1"], |
|
"Example": ["Ex1"], |
|
"Prerequisites": [[]], |
|
"Learning_Outcomes": [[]], |
|
"Common_Misconceptions": [[]], |
|
"Difficulty": ["easy"], |
|
} |
|
mock_ui_dataframe = pd.DataFrame(sample_df_data) |
|
|
|
mock_export_csv_in_test_module.return_value = "/fake/path/export.csv" |
|
|
|
|
|
|
|
|
|
result_path = export_csv(mock_ui_dataframe) |
|
|
|
|
|
mock_export_csv_in_test_module.assert_called_once_with(mock_ui_dataframe) |
|
assert result_path == "/fake/path/export.csv" |
|
|
|
|
|
|
|
def test_export_anki_button_click(mocker): |
|
"""Test that export_anki_button click calls the correct core function.""" |
|
|
|
mock_export_deck_in_test_module = mocker.patch( |
|
"tests.integration.test_app_interactions.export_deck" |
|
) |
|
|
|
|
|
sample_df_data = { |
|
"Index": ["1.1"], |
|
"Topic": ["T1"], |
|
"Card_Type": ["basic"], |
|
"Question": ["Q1"], |
|
"Answer": ["A1"], |
|
"Explanation": ["E1"], |
|
"Example": ["Ex1"], |
|
"Prerequisites": [[]], |
|
"Learning_Outcomes": [[]], |
|
"Common_Misconceptions": [[]], |
|
"Difficulty": ["easy"], |
|
} |
|
mock_ui_dataframe = pd.DataFrame(sample_df_data) |
|
mock_subject_input = "My Anki Deck Subject" |
|
mock_export_deck_in_test_module.return_value = "/fake/path/export.apkg" |
|
|
|
|
|
result_path = export_deck(mock_ui_dataframe, mock_subject_input) |
|
|
|
|
|
mock_export_deck_in_test_module.assert_called_once_with( |
|
mock_ui_dataframe, mock_subject_input |
|
) |
|
assert result_path == "/fake/path/export.apkg" |
|
|