Upload folder using huggingface_hub
Browse files- conftest.py +7 -1
- run_backend_ops.py +2 -1
- run_bench_tests.py +2 -1
conftest.py
CHANGED
|
@@ -12,8 +12,14 @@ from appium import webdriver
|
|
| 12 |
from utils import options, write_qdc_log
|
| 13 |
|
| 14 |
|
| 15 |
-
@pytest.fixture(scope="session"
|
| 16 |
def driver():
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 17 |
return webdriver.Remote(command_executor="http://127.0.0.1:4723/wd/hub", options=options)
|
| 18 |
|
| 19 |
|
|
|
|
| 12 |
from utils import options, write_qdc_log
|
| 13 |
|
| 14 |
|
| 15 |
+
@pytest.fixture(scope="session")
|
| 16 |
def driver():
|
| 17 |
+
"""Appium WebDriver fixture (only used by scorecard tests).
|
| 18 |
+
|
| 19 |
+
For most Linux tests, direct SSH commands are used instead.
|
| 20 |
+
This fixture is not auto-initialized to avoid connection errors
|
| 21 |
+
when the Appium server isn't needed.
|
| 22 |
+
"""
|
| 23 |
return webdriver.Remote(command_executor="http://127.0.0.1:4723/wd/hub", options=options)
|
| 24 |
|
| 25 |
|
run_backend_ops.py
CHANGED
|
@@ -19,7 +19,8 @@ from utils import BIN_PATH, CMD_PREFIX, push_bundle_if_needed, run_shell_command
|
|
| 19 |
|
| 20 |
|
| 21 |
@pytest.fixture(scope="session", autouse=True)
|
| 22 |
-
def install(
|
|
|
|
| 23 |
push_bundle_if_needed(f"{BIN_PATH}/test-backend-ops")
|
| 24 |
|
| 25 |
|
|
|
|
| 19 |
|
| 20 |
|
| 21 |
@pytest.fixture(scope="session", autouse=True)
|
| 22 |
+
def install():
|
| 23 |
+
"""Push llama_cpp_bundle to the device if needed."""
|
| 24 |
push_bundle_if_needed(f"{BIN_PATH}/test-backend-ops")
|
| 25 |
|
| 26 |
|
run_bench_tests.py
CHANGED
|
@@ -26,7 +26,8 @@ CLI_OPTS = "--batch-size 128 -n 128 -no-cnv --seed 42"
|
|
| 26 |
|
| 27 |
|
| 28 |
@pytest.fixture(scope="session", autouse=True)
|
| 29 |
-
def install(
|
|
|
|
| 30 |
push_bundle_if_needed(f"{BIN_PATH}/llama-cli")
|
| 31 |
|
| 32 |
# Skip model download if already present
|
|
|
|
| 26 |
|
| 27 |
|
| 28 |
@pytest.fixture(scope="session", autouse=True)
|
| 29 |
+
def install():
|
| 30 |
+
"""Push llama_cpp_bundle to the device and download model if needed."""
|
| 31 |
push_bundle_if_needed(f"{BIN_PATH}/llama-cli")
|
| 32 |
|
| 33 |
# Skip model download if already present
|