mmmay0722 commited on
Commit
51a1a1d
ยท
1 Parent(s): d75cc85

feat: add multilingual support with English/Chinese UI switching

Browse files

- Add report en|zh parameter to switch UI language between English and Chinese
- Implement bilingual Gradio interface with language toggle support
- Configure default test cases for navigation functionality testing
- Update entry scripts with English prompts and instructions

Dockerfile CHANGED
@@ -1,9 +1,9 @@
1
  FROM mcr.microsoft.com/playwright/python:v1.52.0-noble
2
 
3
- # ่ฎพ็ฝฎๅทฅไฝœ็›ฎๅฝ•
4
  WORKDIR /app
5
 
6
- # ๅฎ‰่ฃ… Node.js ๅ’Œ npm๏ผŒไปฅๅŠๅฟ…่ฆ็š„ๅทฅๅ…ท
7
  RUN apt-get update && apt-get install -y \
8
  curl \
9
  unzip \
@@ -12,13 +12,13 @@ RUN apt-get update && apt-get install -y \
12
  && apt-get install -y nodejs \
13
  && rm -rf /var/lib/apt/lists/*
14
 
15
- # ไผ˜ๅŒ–pip้…็ฝฎๅ’Œ็ฝ‘็ปœ่ฎพ็ฝฎ
16
  RUN pip config set global.index-url https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple && \
17
  pip config set global.trusted-host mirrors.tuna.tsinghua.edu.cn && \
18
  pip config set global.timeout 300 && \
19
  pip config set global.retries 5
20
 
21
- # 1. ๅ…ˆๅฎ‰่ฃ…nuclei
22
  RUN ARCH=$(dpkg --print-architecture) && \
23
  if [ "$ARCH" = "amd64" ]; then \
24
  NUCLEI_ARCH="amd64"; \
@@ -35,19 +35,19 @@ RUN ARCH=$(dpkg --print-architecture) && \
35
  chmod +x /usr/local/bin/nuclei && \
36
  rm -rf /tmp/nuclei /tmp/nuclei.zip
37
 
38
- # 2. ๅคๅˆถPythonไพ่ต–ๆ–‡ไปถๅนถๅฎ‰่ฃ…
39
  COPY requirements.txt /app/
40
  RUN pip install --no-cache-dir --default-timeout=300 -r requirements.txt
41
 
42
- # 3. ๅคๅˆถNode.jsไพ่ต–ๆ–‡ไปถๅนถๅฎ‰่ฃ…
43
  COPY package.json /app/
44
  RUN npm install
45
 
46
- # 4. ๅคๅˆถ้กน็›ฎๆ–‡ไปถ
47
  COPY . /app
48
 
49
- # ๆ›ดๆ–ฐnucleiๆจกๆฟ
50
  RUN nuclei -ut -v
51
 
52
- # ่ฎพ็ฝฎ่ฟ่กŒwebqa-agent
53
  CMD ["python", "webqa-agent.py"]
 
1
  FROM mcr.microsoft.com/playwright/python:v1.52.0-noble
2
 
3
+ # Set working directory
4
  WORKDIR /app
5
 
6
+ # Install Node.js, npm, and necessary tools
7
  RUN apt-get update && apt-get install -y \
8
  curl \
9
  unzip \
 
12
  && apt-get install -y nodejs \
13
  && rm -rf /var/lib/apt/lists/*
14
 
15
+ # Optimize pip configuration and network settings
16
  RUN pip config set global.index-url https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple && \
17
  pip config set global.trusted-host mirrors.tuna.tsinghua.edu.cn && \
18
  pip config set global.timeout 300 && \
19
  pip config set global.retries 5
20
 
21
+ # 1. Install nuclei first
22
  RUN ARCH=$(dpkg --print-architecture) && \
23
  if [ "$ARCH" = "amd64" ]; then \
24
  NUCLEI_ARCH="amd64"; \
 
35
  chmod +x /usr/local/bin/nuclei && \
36
  rm -rf /tmp/nuclei /tmp/nuclei.zip
37
 
38
+ # 2. Copy Python dependency file and install
39
  COPY requirements.txt /app/
40
  RUN pip install --no-cache-dir --default-timeout=300 -r requirements.txt
41
 
42
+ # 3. Copy Node.js dependency file and install
43
  COPY package.json /app/
44
  RUN npm install
45
 
46
+ # 4. Copy project files
47
  COPY . /app
48
 
49
+ # Update nuclei templates
50
  RUN nuclei -ut -v
51
 
52
+ # Set to run webqa-agent
53
  CMD ["python", "webqa-agent.py"]
README_zh-CN.md CHANGED
@@ -139,7 +139,6 @@ browser_config:
139
  headless: False # Docker็Žฏๅขƒไผš่‡ชๅŠจ่ฆ†็›–ไธบTrue
140
  language: zh-CN
141
  cookies: []
142
-
143
  ```
144
 
145
  ๅœจ้…็ฝฎๅ’Œ่ฟ่กŒๆต‹่ฏ•ๆ—ถ๏ผŒ่ฏทๆณจๆ„ไปฅไธ‹้‡่ฆไบ‹้กน๏ผš
 
139
  headless: False # Docker็Žฏๅขƒไผš่‡ชๅŠจ่ฆ†็›–ไธบTrue
140
  language: zh-CN
141
  cookies: []
 
142
  ```
143
 
144
  ๅœจ้…็ฝฎๅ’Œ่ฟ่กŒๆต‹่ฏ•ๆ—ถ๏ผŒ่ฏทๆณจๆ„ไปฅไธ‹้‡่ฆไบ‹้กน๏ผš
app.py CHANGED
@@ -1,6 +1,6 @@
1
  #!/usr/bin/env python3
2
  """
3
- WebQA Agent GradioๅฏๅŠจ่„šๆœฌ
4
  """
5
 
6
  import sys
@@ -8,20 +8,36 @@ import os
8
  import subprocess
9
  import asyncio
10
 
11
- # ๆทปๅŠ ้กน็›ฎ่ทฏๅพ„ๅˆฐPython่ทฏๅพ„
12
- sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
13
 
14
- # ๅฏผๅ…ฅๅนถๅฏๅŠจGradioๅบ”็”จ
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15
  if __name__ == "__main__":
16
  try:
17
- from demo_gradio import create_gradio_interface, queue_manager, process_queue
18
  import threading
19
  from playwright.async_api import async_playwright, Error as PlaywrightError
20
 
21
- print("๐Ÿš€ ๅฏๅŠจWebQA Agent Gradio็•Œ้ข...")
22
- print("๐Ÿ“ฑ ็•Œ้ขๅฐ†ๅœจ http://localhost:7860 ๅฏๅŠจ")
23
- print("โš ๏ธ ๆณจๆ„๏ผš่ฏท็กฎไฟๅทฒๅฎ‰่ฃ…ๆ‰€ๆœ‰ไพ่ต–ๅŒ… (pip install -r requirements.txt)")
24
- print("๐Ÿ” ๆญฃๅœจๆฃ€ๆŸฅ Playwright ๆต่งˆๅ™จไพ่ต–...")
 
 
25
 
26
  async def _check_playwright():
27
  try:
@@ -36,53 +52,53 @@ if __name__ == "__main__":
36
 
37
  ok = asyncio.run(_check_playwright())
38
  if not ok:
39
- print("โš ๏ธ ๆฃ€ๆต‹ๅˆฐ Playwright ๆต่งˆๅ™จๆœชๅฎ‰่ฃ…๏ผŒๆญฃๅœจ่‡ชๅŠจๅฎ‰่ฃ…...")
40
  try:
41
  cmd = [sys.executable, "-m", "playwright", "install"]
42
  result = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, text=True)
43
  print(result.stdout)
44
  except Exception as e:
45
- print(f"โŒ ่‡ชๅŠจๅฎ‰่ฃ…ๅคฑ่ดฅ๏ผš{e}\n่ฏทๆ‰‹ๅŠจๆ‰ง่กŒ๏ผšplaywright install")
46
  sys.exit(1)
47
 
48
- # ๅฎ‰่ฃ…ๅŽๅ†ๆฌกๆ ก้ชŒ
49
  ok_after = asyncio.run(_check_playwright())
50
  if not ok_after:
51
- print("โŒ Playwright ๆต่งˆๅ™จไปไธๅฏ็”จ๏ผŒ่ฏทๆ‰‹ๅŠจๆ‰ง่กŒ๏ผšplaywright install")
52
  sys.exit(1)
53
- print("โœ… Playwright ๆต่งˆๅ™จๅฏ็”จ")
54
 
55
- # ๅฏๅŠจ้˜Ÿๅˆ—ๅค„็†ๅ™จ
56
  def run_queue_processor():
57
- """ๅœจๅŽๅฐ็บฟ็จ‹ไธญ่ฟ่กŒ้˜Ÿๅˆ—ๅค„็†ๅ™จ"""
58
  loop = asyncio.new_event_loop()
59
  asyncio.set_event_loop(loop)
60
  loop.run_until_complete(process_queue())
61
 
62
  queue_thread = threading.Thread(target=run_queue_processor, daemon=True)
63
  queue_thread.start()
64
- print("โœ… ไปปๅŠก้˜Ÿๅˆ—ๅค„็†ๅ™จๅทฒๅฏๅŠจ")
65
 
66
- # ๅˆ›ๅปบๅนถๅฏๅŠจGradioๅบ”็”จ
67
- app = create_gradio_interface()
68
- print("โœ… Gradio็•Œ้ขๅทฒๅˆ›ๅปบ")
69
 
70
  app.launch(
71
  server_name="0.0.0.0",
72
  server_port=7860,
73
  share=False,
74
  show_error=True,
75
- inbrowser=True # ่‡ชๅŠจๆ‰“ๅผ€ๆต่งˆๅ™จ
76
  )
77
 
78
  except ImportError as e:
79
- print(f"โŒ ๅฏผๅ…ฅ้”™่ฏฏ: {e}")
80
- print("่ฏท็กฎไฟๅทฒๅฎ‰่ฃ…ๆ‰€ๆœ‰ไพ่ต–ๅŒ…:")
81
  print("pip install -r requirements.txt")
82
  sys.exit(1)
83
 
84
  except Exception as e:
85
- print(f"โŒ ๅฏๅŠจๅคฑ่ดฅ: {e}")
86
  import traceback
87
  traceback.print_exc()
88
  sys.exit(1)
 
1
  #!/usr/bin/env python3
2
  """
3
+ WebQA Agent Gradio Launch Script
4
  """
5
 
6
  import sys
 
8
  import subprocess
9
  import asyncio
10
 
11
+ # Add project path to Python path
12
+ sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
13
 
14
+ # Language configuration from environment variable
15
+ def get_gradio_language():
16
+ """Get Gradio interface language from environment variable with validation"""
17
+ supported_languages = ["zh-CN", "en-US"]
18
+ env_lang = os.getenv("GRADIO_LANGUAGE", "en-US") # Default to English
19
+
20
+ if env_lang in supported_languages:
21
+ return env_lang
22
+ else:
23
+ print(f"โš ๏ธ Warning: Unsupported language '{env_lang}', falling back to 'en-US'")
24
+ return "en-US"
25
+
26
+ GRADIO_LANGUAGE = get_gradio_language()
27
+
28
+ # Import and launch Gradio application
29
  if __name__ == "__main__":
30
  try:
31
+ from app_gradio.demo_gradio import create_gradio_interface, queue_manager, process_queue
32
  import threading
33
  from playwright.async_api import async_playwright, Error as PlaywrightError
34
 
35
+ print("๐Ÿš€ Starting WebQA Agent Gradio interface...")
36
+ print("๐Ÿ“ฑ Interface will start at http://localhost:7860")
37
+ print(f"๐ŸŒ Interface language: {GRADIO_LANGUAGE}")
38
+ print("๐Ÿ’ก Tip: Set environment variable GRADIO_LANGUAGE=en-US for English or GRADIO_LANGUAGE=zh-CN for Chinese")
39
+ print("โš ๏ธ Note: Please ensure all dependencies are installed (pip install -r requirements.txt)")
40
+ print("๐Ÿ” Checking Playwright browser dependencies...")
41
 
42
  async def _check_playwright():
43
  try:
 
52
 
53
  ok = asyncio.run(_check_playwright())
54
  if not ok:
55
+ print("โš ๏ธ Detected Playwright browsers not installed, installing automatically...")
56
  try:
57
  cmd = [sys.executable, "-m", "playwright", "install"]
58
  result = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, text=True)
59
  print(result.stdout)
60
  except Exception as e:
61
+ print(f"โŒ Automatic installation failed: {e}\nPlease run manually: playwright install")
62
  sys.exit(1)
63
 
64
+ # Verify again after installation
65
  ok_after = asyncio.run(_check_playwright())
66
  if not ok_after:
67
+ print("โŒ Playwright browsers still unavailable, please run manually: playwright install")
68
  sys.exit(1)
69
+ print("โœ… Playwright browsers available")
70
 
71
+ # Start queue processor
72
  def run_queue_processor():
73
+ """Run queue processor in background thread"""
74
  loop = asyncio.new_event_loop()
75
  asyncio.set_event_loop(loop)
76
  loop.run_until_complete(process_queue())
77
 
78
  queue_thread = threading.Thread(target=run_queue_processor, daemon=True)
79
  queue_thread.start()
80
+ print("โœ… Task queue processor started")
81
 
82
+ # Create and launch Gradio application with language configuration
83
+ app = create_gradio_interface(language=GRADIO_LANGUAGE)
84
+ print(f"โœ… Gradio interface created with language: {GRADIO_LANGUAGE}")
85
 
86
  app.launch(
87
  server_name="0.0.0.0",
88
  server_port=7860,
89
  share=False,
90
  show_error=True,
91
+ inbrowser=True # Auto open browser
92
  )
93
 
94
  except ImportError as e:
95
+ print(f"โŒ Import error: {e}")
96
+ print("Please ensure all dependencies are installed:")
97
  print("pip install -r requirements.txt")
98
  sys.exit(1)
99
 
100
  except Exception as e:
101
+ print(f"โŒ Startup failed: {e}")
102
  import traceback
103
  traceback.print_exc()
104
  sys.exit(1)
demo_gradio.py โ†’ app_gradio/demo_gradio.py RENAMED
@@ -17,15 +17,50 @@ import re
17
  import gradio as gr
18
  import yaml
19
 
20
- # ๅฏผๅ…ฅ้กน็›ฎๆจกๅ—
21
  from webqa_agent.executor import ParallelMode
22
 
23
- # ็ฎ€ๅ•็š„ๆไบคๅކๅฒ๏ผˆไป…ๅฝ“ๅ‰ไผš่ฏๅ†…ๅญ˜ไฟๅญ˜๏ผ‰
24
  submission_history: list = []
25
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
26
 
27
  class QueueManager:
28
- """ไปปๅŠก้˜Ÿๅˆ—็ฎก็†ๅ™จ๏ผŒ็กฎไฟๅŒๆ—ถๅชๆœ‰ไธ€ไธชไปปๅŠกๅœจๆ‰ง่กŒ"""
29
 
30
  def __init__(self):
31
  self.current_task: Optional[str] = None
@@ -34,7 +69,7 @@ class QueueManager:
34
  self.lock = Lock()
35
 
36
  def add_task(self, task_id: str, user_info: Dict) -> int:
37
- """ๆทปๅŠ ไปปๅŠกๅˆฐ้˜Ÿๅˆ—๏ผŒ่ฟ”ๅ›ž้˜Ÿๅˆ—ไฝ็ฝฎ"""
38
  with self.lock:
39
  self.task_status[task_id] = {
40
  "status": "queued",
@@ -47,7 +82,7 @@ class QueueManager:
47
  return self.task_queue.qsize()
48
 
49
  def get_next_task(self) -> Optional[str]:
50
- """่Žทๅ–ไธ‹ไธ€ไธชๅพ…ๆ‰ง่กŒ็š„ไปปๅŠก"""
51
  with self.lock:
52
  if self.current_task is None and not self.task_queue.empty():
53
  task_id = self.task_queue.get()
@@ -58,7 +93,7 @@ class QueueManager:
58
  return None
59
 
60
  def complete_task(self, task_id: str, result: Any = None, error: Any = None):
61
- """ๆ ‡่ฎฐไปปๅŠกๅฎŒๆˆ"""
62
  with self.lock:
63
  if task_id in self.task_status:
64
  self.task_status[task_id]["status"] = "completed" if result else "failed"
@@ -69,49 +104,47 @@ class QueueManager:
69
  self.current_task = None
70
 
71
  def get_queue_position(self, task_id: str) -> int:
72
- """่Žทๅ–ไปปๅŠกๅœจ้˜Ÿๅˆ—ไธญ็š„ไฝ็ฝฎ"""
73
  with self.lock:
74
  if task_id == self.current_task:
75
- return 0 # ๅฝ“ๅ‰ๆญฃๅœจๆ‰ง่กŒ
76
 
77
  queue_list = list(self.task_queue.queue)
78
  try:
79
  return queue_list.index(task_id) + 1
80
  except ValueError:
81
- return -1 # ไปปๅŠกไธๅœจ้˜Ÿๅˆ—ไธญ
82
 
83
  def get_task_status(self, task_id: str) -> Dict:
84
- """่Žทๅ–ไปปๅŠก็Šถๆ€"""
85
  with self.lock:
86
  return self.task_status.get(task_id, {"status": "not_found"})
87
 
88
 
89
- # ๅ…จๅฑ€้˜Ÿๅˆ—็ฎก็†ๅ™จ
90
  queue_manager = QueueManager()
91
 
92
 
93
- def validate_llm_config(api_key: str, base_url: str, model: str) -> Tuple[bool, str]:
94
- """้ชŒ่ฏLLM้…็ฝฎ"""
95
  if not api_key.strip():
96
- return False, "API Keyไธ่ƒฝไธบ็ฉบ"
97
 
98
  if not base_url.strip():
99
- return False, "Base URLไธ่ƒฝไธบ็ฉบ"
100
 
101
  if not model.strip():
102
- return False, "ๆจกๅž‹ๅ็งฐไธ่ƒฝไธบ็ฉบ"
103
 
104
- # ็ฎ€ๅ•็š„URLๆ ผๅผๆฃ€ๆŸฅ
105
  if not (base_url.startswith("http://") or base_url.startswith("https://")):
106
- return False, "Base URLๆ ผๅผไธๆญฃ็กฎ๏ผŒๅบ”ไปฅhttp://ๆˆ–https://ๅผ€ๅคด"
107
 
108
- return True, "้…็ฝฎ้ชŒ่ฏ้€š่ฟ‡"
109
 
110
 
111
  def create_config_dict(
112
  url: str,
113
- # description: str,
114
- # max_concurrent_tests: int,
115
  function_test_enabled: bool,
116
  function_test_type: str,
117
  business_objectives: str,
@@ -120,18 +153,14 @@ def create_config_dict(
120
  security_test_enabled: bool,
121
  api_key: str,
122
  base_url: str,
123
- model: str
124
- # viewport_width: int,
125
- # viewport_height: int,
126
- # headless: bool,
127
- # language: str
128
  ) -> Dict[str, Any]:
129
- """ๅˆ›ๅปบ้…็ฝฎๅญ—ๅ…ธ"""
130
  config = {
131
  "target": {
132
  "url": url,
133
  "description": ""
134
- # "max_concurrent_tests": max_concurrent_tests
135
  },
136
  "test_config": {
137
  "function_test": {
@@ -155,6 +184,9 @@ def create_config_dict(
155
  "base_url": base_url,
156
  "temperature": 0.1
157
  },
 
 
 
158
  "browser_config": {
159
  "viewport": {"width": 1280, "height": 720},
160
  "headless": True,
@@ -167,13 +199,13 @@ def create_config_dict(
167
 
168
 
169
  def build_test_configurations(config: Dict[str, Any]) -> list:
170
- """ๆ นๆฎ้…็ฝฎๆž„ๅปบๆต‹่ฏ•้…็ฝฎๅˆ—่กจ"""
171
  tests = []
172
  tconf = config.get("test_config", {})
173
 
174
  base_browser = {
175
  "viewport": config.get("browser_config", {}).get("viewport", {"width": 1280, "height": 720}),
176
- "headless": True, # Web็•Œ้ขๅผบๅˆถheadless
177
  }
178
 
179
  # function test
@@ -181,7 +213,6 @@ def build_test_configurations(config: Dict[str, Any]) -> list:
181
  if tconf["function_test"].get("type") == "ai":
182
  tests.append({
183
  "test_type": "ui_agent_langgraph",
184
- "test_name": "ๆ™บ่ƒฝๅŠŸ่ƒฝๆต‹่ฏ•",
185
  "enabled": True,
186
  "browser_config": base_browser,
187
  "test_specific_config": {
@@ -192,15 +223,7 @@ def build_test_configurations(config: Dict[str, Any]) -> list:
192
  else:
193
  tests += [
194
  {
195
- "test_type": "button_test",
196
- "test_name": "้ๅކๆต‹่ฏ•",
197
- "enabled": True,
198
- "browser_config": base_browser,
199
- "test_specific_config": {},
200
- },
201
- {
202
- "test_type": "web_basic_check",
203
- "test_name": "ๆŠ€ๆœฏๅฅๅบทๅบฆๆฃ€ๆŸฅ",
204
  "enabled": True,
205
  "browser_config": base_browser,
206
  "test_specific_config": {},
@@ -211,7 +234,6 @@ def build_test_configurations(config: Dict[str, Any]) -> list:
211
  if tconf.get("ux_test", {}).get("enabled"):
212
  tests.append({
213
  "test_type": "ux_test",
214
- "test_name": "็”จๆˆทไฝ“้ชŒๆต‹่ฏ•",
215
  "enabled": True,
216
  "browser_config": base_browser,
217
  "test_specific_config": {},
@@ -221,7 +243,6 @@ def build_test_configurations(config: Dict[str, Any]) -> list:
221
  if tconf.get("performance_test", {}).get("enabled"):
222
  tests.append({
223
  "test_type": "performance",
224
- "test_name": "ๆ€ง่ƒฝๆต‹่ฏ•",
225
  "enabled": True,
226
  "browser_config": base_browser,
227
  "test_specific_config": {},
@@ -231,7 +252,6 @@ def build_test_configurations(config: Dict[str, Any]) -> list:
231
  if tconf.get("security_test", {}).get("enabled"):
232
  tests.append({
233
  "test_type": "security",
234
- "test_name": "ๅฎ‰ๅ…จๆต‹่ฏ•",
235
  "enabled": True,
236
  "browser_config": base_browser,
237
  "test_specific_config": {},
@@ -240,10 +260,10 @@ def build_test_configurations(config: Dict[str, Any]) -> list:
240
  return tests
241
 
242
 
243
- async def run_webqa_test(config: Dict[str, Any]) -> Tuple[Optional[str], Optional[str], Optional[str]]:
244
- """่ฟ่กŒWebQAๆต‹่ฏ•"""
245
  try:
246
- # ้ชŒ่ฏLLM้…็ฝฎ
247
  llm_config = {
248
  "api": "openai",
249
  "model": config["llm_config"]["model"],
@@ -252,36 +272,34 @@ async def run_webqa_test(config: Dict[str, Any]) -> Tuple[Optional[str], Optiona
252
  "temperature": config["llm_config"]["temperature"],
253
  }
254
 
255
- # ๆž„ๅปบๆต‹่ฏ•้…็ฝฎ
256
  test_configurations = build_test_configurations(config)
257
 
258
  if not test_configurations:
259
- return None, None, "้”™่ฏฏ๏ผšๆœชๅฏ็”จไปปไฝ•ๆต‹่ฏ•็ฑปๅž‹"
260
 
261
  target_url = config["target"]["url"]
262
- # max_concurrent_tests = config["target"].get("max_concurrent_tests", 2)
263
  max_concurrent_tests = 1
264
-
265
- # ๆ‰ง่กŒๆต‹่ฏ•
266
  parallel_mode = ParallelMode([], max_concurrent_tests=max_concurrent_tests)
267
  results, report_path, html_report_path, result_count = await parallel_mode.run(
268
  url=target_url,
269
  llm_config=llm_config,
270
  test_configurations=test_configurations,
271
- log_cfg=config.get("log", {"level": "info"})
 
272
  )
273
 
274
  return html_report_path, report_path, None
275
 
276
  except Exception as e:
277
- error_msg = f"ๆต‹่ฏ•ๆ‰ง่กŒๅคฑ่ดฅ: {str(e)}\n{traceback.format_exc()}"
278
  return None, None, error_msg
279
 
280
 
281
  def submit_test(
282
  url: str,
283
- # description: str,
284
- # max_concurrent_tests: int,
285
  function_test_enabled: bool,
286
  function_test_type: str,
287
  business_objectives: str,
@@ -290,51 +308,49 @@ def submit_test(
290
  security_test_enabled: bool,
291
  api_key: str,
292
  base_url: str,
293
- model: str
294
- # viewport_width: int,
295
- # viewport_height: int,
296
- # headless: bool,
297
- # language: str
298
  ) -> Tuple[str, str, bool]:
299
- """ๆไบคๆต‹่ฏ•ไปปๅŠก๏ผŒ่ฟ”ๅ›ž(็Šถๆ€ๆถˆๆฏ, ไปปๅŠกID, ๆ˜ฏๅฆๆˆๅŠŸ)"""
300
 
301
- # ๅŸบๆœฌ้ชŒ่ฏ
302
  if not url.strip():
303
- return "โŒ ้”™่ฏฏ๏ผš็›ฎๆ ‡URLไธ่ƒฝไธบ็ฉบ", "", False
304
 
305
- # ้ชŒ่ฏ่‡ณๅฐ‘ๅฏ็”จไธ€ไธชๆต‹่ฏ•
306
  if not any([function_test_enabled, ux_test_enabled, performance_test_enabled, security_test_enabled]):
307
- return "โŒ ้”™่ฏฏ๏ผš่‡ณๅฐ‘้œ€่ฆๅฏ็”จไธ€ไธชๆต‹่ฏ•็ฑปๅž‹", "", False
308
 
309
- # ๅฆ‚ๆžœๅฏ็”จๅŠŸ่ƒฝๆต‹่ฏ•ไฝ†ๆฒกๆœ‰่ฎพ็ฝฎไธšๅŠก็›ฎๆ ‡
310
  if function_test_enabled and function_test_type == "ai" and not business_objectives.strip():
311
- return "โŒ ้”™่ฏฏ๏ผšAIๅŠŸ่ƒฝๆต‹่ฏ•้œ€่ฆ่ฎพ็ฝฎไธšๅŠก็›ฎๆ ‡", "", False
312
 
313
- # ้ชŒ่ฏLLM้…็ฝฎ
314
- valid, msg = validate_llm_config(api_key, base_url, model)
315
  if not valid:
316
- return f"โŒ ้”™่ฏฏ๏ผš{msg}", "", False
317
 
318
- # ๅˆ›ๅปบ้…็ฝฎ
319
  config = create_config_dict(
320
  url,
321
  function_test_enabled, function_test_type, business_objectives,
322
  ux_test_enabled, performance_test_enabled, security_test_enabled,
323
- api_key, base_url, model
 
324
  )
325
 
326
- # ็”ŸๆˆไปปๅŠกID
327
  task_id = str(uuid.uuid4())
328
 
329
- # ๆทปๅŠ ๅˆฐ้˜Ÿๅˆ—
330
- user_info = {"config": config, "submitted_at": datetime.now()}
331
  position = queue_manager.add_task(task_id, user_info)
332
 
333
- status_msg = f"โœ… ไปปๅŠกๅทฒๆไบค๏ผ\nไปปๅŠกID: {task_id}\nๅฝ“ๅ‰้˜Ÿๅˆ—ไฝ็ฝฎ: {position}"
334
  if position > 1:
335
- status_msg += f"\nโณ ่ฏท่€ๅฟƒ็ญ‰ๅพ…๏ผŒๅ‰้ข่ฟ˜ๆœ‰ {position-1} ไธชไปปๅŠกๅœจๆŽ’้˜Ÿ"
336
 
337
- # ่ฎฐๅฝ•ๅކๅฒๆไบค
338
  submission_history.append({
339
  "task_id": task_id,
340
  "url": url,
@@ -349,12 +365,12 @@ def submit_test(
349
  return status_msg, task_id, True
350
 
351
 
352
- def check_task_status(task_id: str) -> Tuple[str, str, Any]:
353
- """ๆฃ€ๆŸฅไปปๅŠก็Šถๆ€"""
354
  if not task_id.strip():
355
  return (
356
- "่ฏท่พ“ๅ…ฅไปปๅŠกID",
357
- "<div style='text-align: center; padding: 50px; color: #888;'>๐Ÿ“„ ่ฏทๅ…ˆ่พ“ๅ…ฅไปปๅŠกIDๅนถๆŸฅ่ฏข็Šถๆ€</div>",
358
  gr.update(visible=False, value=None),
359
  )
360
 
@@ -362,35 +378,35 @@ def check_task_status(task_id: str) -> Tuple[str, str, Any]:
362
 
363
  if status["status"] == "not_found":
364
  return (
365
- "โŒ ไปปๅŠกไธๅญ˜ๅœจ",
366
- "<div style='text-align: center; padding: 50px; color: #ff6b6b;'>โŒ ไปปๅŠกไธๅญ˜ๅœจ๏ผŒ่ฏทๆฃ€ๆŸฅไปปๅŠกIDๆ˜ฏๅฆๆญฃ็กฎ</div>",
367
  gr.update(visible=False, value=None),
368
  )
369
 
370
  if status["status"] == "queued":
371
  position = queue_manager.get_queue_position(task_id)
372
  return (
373
- f"โณ ไปปๅŠกๆŽ’้˜Ÿไธญ๏ผŒๅฝ“ๅ‰ไฝ็ฝฎ: {position}",
374
- "<div style='text-align: center; padding: 50px; color: #ffa500;'>โณ ไปปๅŠกๆญฃๅœจๆŽ’้˜Ÿไธญ๏ผŒ่ฏท็จๅŽๅ†ๆŸฅ่ฏข</div>",
375
  gr.update(visible=False, value=None),
376
  )
377
 
378
  if status["status"] == "running":
379
  return (
380
- "๐Ÿš€ ไปปๅŠกๆญฃๅœจๆ‰ง่กŒไธญ๏ผŒ่ฏท็จๅ€™...",
381
- "<div style='text-align: center; padding: 50px; color: #4dabf7;'>๐Ÿš€ ไปปๅŠกๆญฃๅœจๆ‰ง่กŒไธญ๏ผŒ่ฏท็จๅŽๅ†ๆŸฅ่ฏข็ป“ๆžœ</div>",
382
  gr.update(visible=False, value=None),
383
  )
384
 
385
  if status["status"] == "completed":
386
  result = status.get("result")
387
- if result and result[0]: # html_report_pathๅญ˜ๅœจ
388
- # ่ฏปๅ–HTMLๆŠฅๅ‘Šๅ†…ๅฎน
389
  try:
390
  with open(result[0], 'r', encoding='utf-8') as f:
391
  html_content = f.read()
392
- # ๅฐ†ๆŠฅๅ‘ŠๅŒ…่ฃนๅœจ iframe ไธญไปฅ้š”็ฆปๅ…ถๆ ทๅผ๏ผŒ้ฟๅ…ๅฝฑๅ“ๅค–้ƒจๅธƒๅฑ€
393
- # ๅ†…่”ๆธฒๆŸ“๏ผŒ็งป้™คๅ†…ๅฑ‚ๆปšๅŠจๅ’ŒๆฐดๅนณๆปšๅŠจ
394
  content = html_content
395
  m = re.search(r"<head[^>]*>", content, flags=re.I)
396
  inject_style = (
@@ -410,49 +426,50 @@ def check_task_status(task_id: str) -> Tuple[str, str, Any]:
410
  f"srcdoc=\"{escaped}\"></iframe>"
411
  )
412
  return (
413
- f"โœ… ไปปๅŠกๆ‰ง่กŒๅฎŒๆˆ๏ผ\nๆŠฅๅ‘Š่ทฏๅพ„: {result[0]}",
414
  iframe_html,
415
  gr.update(visible=True, value=result[0]),
416
  )
417
  except Exception as e:
418
  return (
419
- f"โœ… ไปปๅŠกๆ‰ง่กŒๅฎŒๆˆ๏ผŒไฝ†่ฏปๅ–ๆŠฅๅ‘Šๅคฑ่ดฅ: {str(e)}\nๆŠฅๅ‘Š่ทฏๅพ„: {result[0]}",
420
- f"<div style='text-align: center; padding: 50px; color: #ff6b6b;'><p>โŒ ๆ— ๆณ•่ฏปๅ–HTMLๆŠฅๅ‘Šๆ–‡ไปถ</p><p>ๆŠฅๅ‘Š่ทฏๅพ„๏ผš{result[0]}</p><p>้”™่ฏฏไฟกๆฏ๏ผš{str(e)}</p></div>",
421
  gr.update(visible=True, value=result[0]),
422
  )
423
  else:
424
  return (
425
- "โœ… ไปปๅŠกๆ‰ง่กŒๅฎŒๆˆ๏ผŒไฝ†ๆœช็”ŸๆˆHTMLๆŠฅๅ‘Š",
426
- "<div style='text-align: center; padding: 50px; color: #ffa500;'>โš ๏ธ ๆต‹่ฏ•ๆ‰ง่กŒๅฎŒๆˆ๏ผŒไฝ†ๆœช็”ŸๆˆHTMLๆŠฅๅ‘Š</div>",
427
  gr.update(visible=False, value=None),
428
  )
429
 
430
  if status["status"] == "failed":
431
- error = status.get("error", "ๆœช็Ÿฅ้”™่ฏฏ")
432
  return (
433
- f"โŒ ไปปๅŠกๆ‰ง่กŒๅคฑ่ดฅ: {error}",
434
- f"<div style='text-align: center; padding: 50px; color: #ff6b6b;'><p>โŒ ไปปๅŠกๆ‰ง่กŒๅคฑ่ดฅ</p><p>้”™่ฏฏไฟกๆฏ๏ผš{error}</p></div>",
435
  gr.update(visible=False, value=None),
436
  )
437
 
438
  return (
439
- "โ“ ๆœช็Ÿฅ็Šถๆ€",
440
- "<div style='text-align: center; padding: 50px; color: #888;'>โ“ ๆœช็Ÿฅ็Šถๆ€</div>",
441
  gr.update(visible=False, value=None),
442
  )
443
 
444
 
445
  async def process_queue():
446
- """ๅค„็†้˜Ÿๅˆ—ไธญ็š„ไปปๅŠก"""
447
  while True:
448
  task_id = queue_manager.get_next_task()
449
  if task_id:
450
  try:
451
  task_status = queue_manager.get_task_status(task_id)
452
  config = task_status["user_info"]["config"]
 
453
 
454
- # ๆ‰ง่กŒๆต‹่ฏ•
455
- html_report_path, report_path, error = await run_webqa_test(config)
456
 
457
  if error:
458
  queue_manager.complete_task(task_id, error=error)
@@ -462,48 +479,70 @@ async def process_queue():
462
  except Exception as e:
463
  queue_manager.complete_task(task_id, error=str(e))
464
 
465
- await asyncio.sleep(1) # ้ฟๅ…ๅฟ™็ญ‰ๅพ…
466
 
467
 
468
- def create_gradio_interface():
469
- """ๅˆ›ๅปบGradio็•Œ้ข"""
470
 
471
- # ่‡ชๅฎšไน‰CSSๆ ทๅผ
472
  custom_css = """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
473
  #html-report { border: 1px solid #e1e5e9; border-radius: 8px; padding: 0; background: #fff; }
474
  #html-report iframe { width: 100%; height: 1800px; border: none; overflow: hidden; }
475
 
476
  .gradio-container { max-width: 1500px !important; margin: 0 auto !important; width: 100% !important; }
477
 
478
- /* ้˜ฒๆญขๅธƒๅฑ€็ผฉๅฐ */
479
  .tab-nav {
480
  position: sticky;
481
  top: 0;
482
  z-index: 100;
483
  }
484
 
485
- /* ๆ”นๅ–„่กจๅ•ๅธƒๅฑ€ */
486
  .form-group {
487
  margin-bottom: 1rem;
488
  }
489
 
490
- /* ็กฎไฟไปปๅŠก็Šถๆ€ๅŒบๅŸŸไธ็ผฉๅฐ */
491
  .task-status-container {
492
  min-height: 400px;
493
  }
494
 
495
- /* ๅŽป้™คๅฏ†็ ๅญ—ๆฎต็š„ๆ็คบๆ ทๅผ */
496
  input[type="password"] {
497
  background-color: #fff !important;
498
  }
499
 
500
- /* ้กถ้ƒจ GitHub ๅผ•ๆตๆŒ‰้’ฎ */
501
  .gh-cta-wrap { text-align: right; padding-top: 16px; }
502
  .gh-cta {
503
  display: inline-block;
504
  padding: 10px 16px;
505
  border-radius: 8px;
506
- background: linear-gradient(90deg,#2563eb,#7c3aed); /* ่“็ดซๆธๅ˜๏ผŒๆ›ด้†’็›ฎ */
507
  color: #fff !important;
508
  text-decoration: none !important;
509
  font-weight: 600;
@@ -513,16 +552,16 @@ def create_gradio_interface():
513
  }
514
  .gh-cta:hover { transform: translateY(-1px); box-shadow: 0 6px 16px rgba(0,0,0,.16); }
515
 
516
- /* ไธ‰ๅˆ—็ดงๅ‡‘ๆ …ๆ ผไธŽ้—ด่ทไผ˜ๅŒ– */
517
  .config-grid { gap: 16px; flex-wrap: wrap; }
518
  .config-card { background:#fff; border:1px solid #e5e7eb; border-radius:10px; padding:16px; flex: 1 1 calc(50% - 8px); min-width: 300px; }
519
  .config-card h3 { margin:0 0 12px; font-size:16px; border-bottom:1px solid #f1f5f9; padding-bottom:8px; }
520
  .config-card .gradio-checkbox, .config-card .gradio-radio, .config-card .gradio-textbox { margin-bottom:10px; }
521
 
522
- /* ็ปŸไธ€ๅ†…ๅฎนๅฎฝๅบฆๅฎนๅ™จ๏ผˆ็”จไบŽๅ„ไธชTab๏ผ‰ */
523
  .content-wrapper { max-width: 1500px; margin: 0 auto; width: 100%; overflow-x: auto; }
524
 
525
- /* ่กจๆ ผๅฎฝๅบฆ้™ๅˆถ๏ผŒไฝฟ็”จๆ›ดๅผบ็š„้€‰ๆ‹ฉๅ™จ้˜ฒๆญขๆ‹‰ๅฎฝๅฎนๅ™จ */
526
  .fixed-width-table,
527
  .fixed-width-table > div,
528
  .fixed-width-table .table-wrap,
@@ -544,7 +583,7 @@ def create_gradio_interface():
544
  max-width: none !important; /* Remove max-width to allow content to dictate width */
545
  }
546
 
547
- /* ๅ„ๅˆ—ๅฎฝๅบฆๅˆ†้… */
548
  .fixed-width-table th:nth-child(1),
549
  .fixed-width-table td:nth-child(1),
550
  .content-wrapper .gradio-dataframe th:nth-child(1),
@@ -609,7 +648,7 @@ def create_gradio_interface():
609
  vertical-align: middle !important;
610
  }
611
 
612
- /* ่กจๅคดๆ ทๅผไผ˜ๅŒ– */
613
  .fixed-width-table th,
614
  .content-wrapper .gradio-dataframe th {
615
  background-color: #f8fafc !important;
@@ -619,7 +658,7 @@ def create_gradio_interface():
619
  text-align: center !important;
620
  }
621
 
622
- /* ่กจๆ ผ่กŒๆ ทๅผไผ˜ๅŒ– */
623
  .fixed-width-table tbody tr:nth-child(even),
624
  .content-wrapper .gradio-dataframe tbody tr:nth-child(even) {
625
  background-color: #f9fafb !important;
@@ -631,7 +670,7 @@ def create_gradio_interface():
631
  transition: background-color 0.2s ease !important;
632
  }
633
 
634
- /* ่กจๆ ผ่พนๆก†ไผ˜ๅŒ– */
635
  .fixed-width-table table,
636
  .content-wrapper .gradio-dataframe table {
637
  border-collapse: collapse !important;
@@ -655,152 +694,151 @@ def create_gradio_interface():
655
  with gr.Blocks(title="WebQA Agent", theme=gr.themes.Soft(), css=custom_css) as app:
656
  with gr.Row(elem_id="app-wrapper"):
657
  with gr.Column(scale=8):
658
- gr.Markdown("# ๐Ÿค– WebQA Agent")
659
- gr.Markdown("## ๅ…จ่‡ชๅŠจ็ฝ‘้กต่ฏ„ไผฐๆต‹่ฏ• Agent๏ผŒไธ€้”ฎ่ฏŠๆ–ญๅŠŸ่ƒฝไธŽไบคไบ’ไฝ“้ชŒ")
660
- gr.Markdown("้…็ฝฎๅ‚ๆ•ฐๅนถ่ฟ่กŒ็ฝ‘็ซ™่ดจ้‡ๆฃ€ๆต‹ๆต‹่ฏ•ใ€‚็ณป็ปŸๆ”ฏๆŒๆŽ’้˜Ÿๆœบๅˆถ๏ผŒ็กฎไฟ็จณๅฎš่ฟ่กŒใ€‚")
661
  with gr.Column(scale=2):
662
- gr.HTML("<div class='gh-cta-wrap'><a class='gh-cta' href='https://github.com/MigoXLab/webqa-agent' target='_blank' rel='noopener'>๐ŸŒŸ ๅœจ GitHub ไธŠไธบๆˆ‘ไปฌ Star</a></div>")
663
 
664
  with gr.Tabs():
665
- # ้…็ฝฎๆ ‡็ญพ้กต
666
- with gr.TabItem("๐Ÿ“ ๆต‹่ฏ•้…็ฝฎ"):
667
- # ไธคๅˆ—ๅธƒๅฑ€๏ผšๅทฆไพง๏ผˆ็›ฎๆ ‡้…็ฝฎ + LLM้…็ฝฎๅ ๆ”พ๏ผ‰๏ผŒๅณไพง๏ผˆๆต‹่ฏ•็ฑปๅž‹๏ผ‰
668
  with gr.Row(elem_classes=["config-grid"]):
669
  with gr.Column(elem_classes=["config-card"], min_width=300, scale=0):
670
- gr.Markdown("### ๐ŸŽฏ ็›ฎๆ ‡้…็ฝฎ")
671
  url = gr.Textbox(
672
- label="็›ฎๆ ‡URL",
673
- placeholder="https://example.com",
674
  value="https://demo.chat-sdk.dev/",
675
- info="่ฆๆต‹่ฏ•็š„็ฝ‘็ซ™URL"
676
  )
677
 
678
- gr.Markdown("### ๐Ÿค– LLM้…็ฝฎ")
679
  model = gr.Textbox(
680
- label="ๆจกๅž‹ๅ็งฐ",
681
  value="gpt-4.1-mini",
682
- info="ไฝฟ็”จ็š„่ฏญ่จ€ๆจกๅž‹ (OPENAI SDK ๅ…ผๅฎนๆ ผๅผ)"
683
  )
684
  api_key = gr.Textbox(
685
- label="API Key",
686
  value="",
687
- info="LLMๆœๅŠก็š„APIๅฏ†้’ฅ",
688
  type="password"
689
  )
690
  base_url = gr.Textbox(
691
- label="Base URL",
692
  value="",
693
- info="LLMๆœๅŠก็š„ๅŸบ็ก€URL"
694
  )
695
 
696
  with gr.Column(elem_classes=["config-card"], min_width=300, scale=0):
697
- gr.Markdown("### ๐Ÿงช ๆต‹่ฏ•็ฑปๅž‹")
698
- function_test_enabled = gr.Checkbox(label="ๅŠŸ่ƒฝๆต‹่ฏ•", value=True)
699
 
700
  with gr.Group(visible=True) as function_test_group:
701
  function_test_type = gr.Radio(
702
- label="ๅŠŸ่ƒฝๆต‹่ฏ•็ฑปๅž‹",
703
  choices=["default", "ai"],
704
  value="ai",
705
- info="default: ้ๅކๆต‹่ฏ• | ai: ๆ™บ่ƒฝๆต‹่ฏ•"
706
  )
707
  business_objectives = gr.Textbox(
708
- label="ๅŠŸ่ƒฝๆต‹่ฏ•ไธšๅŠก็›ฎๆ ‡",
709
- placeholder="ๆต‹่ฏ•ๅฏน่ฏๅŠŸ่ƒฝ๏ผŒ็”Ÿๆˆ2ไธช็”จไพ‹",
710
- # value="็”Ÿๆˆไธคไธชๆต‹่ฏ•็”จไพ‹",
711
- info="ai: ๆ™บ่ƒฝๆต‹่ฏ•็š„ๅ…ทไฝ“็›ฎๆ ‡๏ผŒๅฏไปฅไฟฎๆ”นไปฅๅฎšไน‰ไธๅŒ็š„ๆต‹่ฏ•ๅœบๆ™ฏ"
712
  )
713
 
714
- ux_test_enabled = gr.Checkbox(label="็”จๆˆทไฝ“้ชŒๆต‹่ฏ•", value=False)
715
  performance_test_enabled = gr.Checkbox(
716
- label="ๆ€ง่ƒฝๆต‹่ฏ•",
717
  value=False,
718
  interactive=False,
719
- info="็›ฎๅ‰ๅœจ ModelScope ็‰ˆๆœฌไธๅฏ็”จ๏ผ›่ฏทๅ‰ๅพ€ GitHub ไฝ“้ชŒ"
720
  )
721
  security_test_enabled = gr.Checkbox(
722
- label="ๅฎ‰ๅ…จๆต‹่ฏ•",
723
  value=False,
724
  interactive=False,
725
- info="็›ฎๅ‰ๅœจ ModelScope ็‰ˆๆœฌไธๅฏ็”จ๏ผ›่ฏทๅ‰ๅพ€ GitHub ไฝ“้ชŒ"
726
  )
727
 
728
  with gr.Row():
729
- submit_btn = gr.Button("๐Ÿš€ ๆไบคๆต‹่ฏ•", variant="primary", size="lg")
730
 
731
- # ็ป“ๆžœๆ˜พ็คบ
732
- with gr.Accordion("๐Ÿ“„ ไปปๅŠกๆไบค็ป“ๆžœ", open=False) as submit_result_accordion:
733
  submit_status = gr.Textbox(
734
- label="ๆไบค็Šถๆ€",
735
  interactive=False,
736
  lines=5,
737
  show_label=False
738
  )
739
  task_id_output = gr.Textbox(
740
- label="ไปปๅŠกID",
741
  interactive=False,
742
  visible=False
743
  )
744
 
745
- # ็Šถๆ€ๆŸฅ่ฏขๆ ‡็ญพ้กต
746
- with gr.TabItem("๐Ÿ“Š ไปปๅŠก็Šถๆ€"):
747
  with gr.Column(elem_classes=["task-status-container"]):
748
- gr.Markdown("### ๆŸฅ่ฏขไปปๅŠกๆ‰ง่กŒ็Šถๆ€")
749
  with gr.Row(variant="compact"):
750
  with gr.Column(min_width=300):
751
  task_id_input = gr.Textbox(
752
- label="ไปปๅŠกID",
753
- placeholder="่พ“ๅ…ฅไปปๅŠกIDๆŸฅ่ฏข็Šถๆ€",
754
- info="ไปŽๆต‹่ฏ•้…็ฝฎ้กต้ข่Žทๅ–็š„ไปปๅŠกID"
755
  )
756
  with gr.Column(min_width=100):
757
- check_btn = gr.Button("๐Ÿ” ๆŸฅ่ฏข็Šถๆ€", variant="secondary", size="lg")
758
 
759
  task_status_output = gr.Textbox(
760
- label="ไปปๅŠก็Šถๆ€",
761
  interactive=False,
762
  lines=5
763
  )
764
 
765
- # HTMLๆŠฅๅ‘Šๆ˜พ็คบ + ไธ‹่ฝฝ๏ผˆๆŒ‰้’ฎๅœจ้ข„่งˆไธŠๆ–น๏ผ‰
766
- gr.Markdown("### ๐Ÿ“‹ ๆต‹่ฏ•ๆŠฅๅ‘Š")
767
  download_file = gr.File(
768
- label="HTMLๆŠฅๅ‘Š",
769
  interactive=False,
770
  visible=False,
771
  file_types=[".html"],
772
  )
773
  html_output = gr.HTML(
774
- label="HTMLๆŠฅๅ‘Š",
775
  visible=True,
776
  elem_id="html-report",
777
  show_label=False,
778
- value="<div style='text-align: center; padding: 50px; color: #888;'>๐Ÿ“„ ่ฏทๅ…ˆๆŸฅ่ฏขไปปๅŠก็Šถๆ€๏ผŒๆˆๅŠŸๅŽๅฐ†ๅœจๆญคๆ˜พ็คบๆต‹่ฏ•ๆŠฅๅ‘Š</div>"
779
  )
780
 
781
- # ๅކๅฒ่ฎฐๅฝ•
782
- with gr.TabItem("๐Ÿ—‚๏ธ ๆไบคๅކๅฒ") as history_tab:
783
  with gr.Column(elem_classes=["content-wrapper"]):
784
- gr.Markdown("### ๆไบค่ฎฐๅฝ•")
785
  history_table = gr.Dataframe(
786
- headers=["ๆไบคๆ—ถ้—ด", "ไปปๅŠกID", "URL", "ๅŠŸ่ƒฝๆต‹่ฏ•", "็ฑปๅž‹", "UXๆต‹่ฏ•"],
787
  row_count=(0, "dynamic"),
788
  interactive=False,
789
  elem_classes=["fixed-width-table"]
790
  )
791
- refresh_history_btn = gr.Button("๐Ÿ”„ ๅˆทๆ–ฐๅކๅฒ่ฎฐๅฝ•", variant="secondary", size="lg")
792
 
793
 
794
- # ไบ‹ไปถ็ป‘ๅฎš
795
  def submit_and_expand(*args):
796
- """ๆไบคไปปๅŠกๅนถๅฑ•ๅผ€็ป“ๆžœ"""
797
- status_msg, task_id, success = submit_test(*args)
798
  if success:
799
  return status_msg, task_id, gr.Accordion(open=True)
800
  else:
801
  return status_msg, task_id, gr.Accordion(open=True)
802
 
803
- # ๆไบคๅŽ่‡ชๅŠจๅฑ•ๅผ€็ป“ๆžœๅนถๅˆทๆ–ฐไธ€ๆฌกๅކๅฒ่กจ
804
  submit_btn.click(
805
  fn=submit_and_expand,
806
  inputs=[
@@ -808,25 +846,24 @@ def create_gradio_interface():
808
  function_test_enabled, function_test_type, business_objectives,
809
  ux_test_enabled, performance_test_enabled, security_test_enabled,
810
  api_key, base_url, model
811
- # viewport_width, viewport_height, headless, language
812
  ],
813
  outputs=[submit_status, task_id_output, submit_result_accordion]
814
  )
815
 
816
  submit_btn.click(
817
- fn=lambda: get_history_rows(),
818
  inputs=[],
819
  outputs=[history_table]
820
  )
821
 
822
  check_btn.click(
823
- fn=check_task_status,
824
  inputs=[task_id_input],
825
  outputs=[task_status_output, html_output, download_file]
826
  )
827
 
828
- # ๅˆทๆ–ฐๅކๅฒ่ฎฐๅฝ•
829
- def get_history_rows():
830
  rows = []
831
  for item in reversed(submission_history[-100:]):
832
  rows.append([
@@ -839,23 +876,23 @@ def create_gradio_interface():
839
  ])
840
  return rows
841
 
842
- # ็ป‘ๅฎšโ€œๆไบคๅކๅฒโ€Tabๅ†…็š„ๅˆทๆ–ฐๆŒ‰้’ฎ
843
  refresh_history_btn.click(
844
- fn=lambda: get_history_rows(),
845
  inputs=[],
846
  outputs=[history_table]
847
  )
848
 
849
- # ็ป‘ๅฎšโ€œๆไบคๅކๅฒโ€Tab้€‰ไธญไบ‹ไปถ๏ผŒ่‡ชๅŠจๅˆทๆ–ฐๅކๅฒ่ฎฐๅฝ•
850
  history_tab.select(
851
- fn=lambda: get_history_rows(),
852
  inputs=[],
853
  outputs=[history_table]
854
  )
855
 
856
- # ๆธ…็ฉบๆŠฅๅ‘Šๆ˜พ็คบๅฝ“่พ“ๅ…ฅๆ”นๅ˜ๆ—ถ
857
  task_id_input.change(
858
- fn=lambda x: ("", "<div style='text-align: center; padding: 50px; color: #888;'>๐Ÿ“„ ่ฏท็‚นๅ‡ปๆŸฅ่ฏข็Šถๆ€ๆŒ‰้’ฎ่Žทๅ–ๆœ€ๆ–ฐ็Šถๆ€</div>"),
859
  inputs=[task_id_input],
860
  outputs=[task_status_output, html_output]
861
  )
@@ -864,11 +901,11 @@ def create_gradio_interface():
864
 
865
 
866
  if __name__ == "__main__":
867
- # ๅฏๅŠจ้˜Ÿๅˆ—ๅค„็†
868
  import threading
869
 
870
  def run_queue_processor():
871
- """ๅœจๅŽๅฐ็บฟ็จ‹ไธญ่ฟ่กŒ้˜Ÿๅˆ—ๅค„็†ๅ™จ"""
872
  loop = asyncio.new_event_loop()
873
  asyncio.set_event_loop(loop)
874
  loop.run_until_complete(process_queue())
@@ -876,7 +913,7 @@ if __name__ == "__main__":
876
  queue_thread = threading.Thread(target=run_queue_processor, daemon=True)
877
  queue_thread.start()
878
 
879
- # ๅˆ›ๅปบๅนถๅฏๅŠจGradioๅบ”็”จ
880
  app = create_gradio_interface()
881
  app.launch(
882
  server_name="0.0.0.0",
 
17
  import gradio as gr
18
  import yaml
19
 
20
+ # Import project modules
21
  from webqa_agent.executor import ParallelMode
22
 
23
+ # Simple submission history (in-memory storage for current session only)
24
  submission_history: list = []
25
 
26
+ # Load i18n data
27
+ def load_i18n() -> Dict[str, Dict]:
28
+ """Load internationalization data from JSON file"""
29
+ i18n_path = Path(__file__).parent / "gradio_i18n.json"
30
+ try:
31
+ with open(i18n_path, 'r', encoding='utf-8') as f:
32
+ return json.load(f)
33
+ except Exception as e:
34
+ print(f"Failed to load i18n file: {e}")
35
+ return {"zh-CN": {}, "en-US": {}}
36
+
37
+ I18N_DATA = load_i18n()
38
+
39
+ def get_text(lang: str, key: str, **kwargs):
40
+ """Get localized text by key"""
41
+ keys = key.split('.')
42
+ data = I18N_DATA.get(lang, I18N_DATA.get("zh-CN", {}))
43
+
44
+ for k in keys:
45
+ if isinstance(data, dict) and k in data:
46
+ data = data[k]
47
+ else:
48
+ return key # Return key if not found
49
+
50
+ if isinstance(data, str):
51
+ # Support simple string formatting
52
+ try:
53
+ return data.format(**kwargs)
54
+ except (KeyError, ValueError):
55
+ return data
56
+ elif isinstance(data, list):
57
+ # Return list as-is for components that expect lists
58
+ return data
59
+ return key
60
+
61
 
62
  class QueueManager:
63
+ """Task queue manager to ensure only one task executes at a time"""
64
 
65
  def __init__(self):
66
  self.current_task: Optional[str] = None
 
69
  self.lock = Lock()
70
 
71
  def add_task(self, task_id: str, user_info: Dict) -> int:
72
+ """Add task to queue, return queue position"""
73
  with self.lock:
74
  self.task_status[task_id] = {
75
  "status": "queued",
 
82
  return self.task_queue.qsize()
83
 
84
  def get_next_task(self) -> Optional[str]:
85
+ """Get next task to execute"""
86
  with self.lock:
87
  if self.current_task is None and not self.task_queue.empty():
88
  task_id = self.task_queue.get()
 
93
  return None
94
 
95
  def complete_task(self, task_id: str, result: Any = None, error: Any = None):
96
+ """Mark task as completed"""
97
  with self.lock:
98
  if task_id in self.task_status:
99
  self.task_status[task_id]["status"] = "completed" if result else "failed"
 
104
  self.current_task = None
105
 
106
  def get_queue_position(self, task_id: str) -> int:
107
+ """Get task position in queue"""
108
  with self.lock:
109
  if task_id == self.current_task:
110
+ return 0 # Currently executing
111
 
112
  queue_list = list(self.task_queue.queue)
113
  try:
114
  return queue_list.index(task_id) + 1
115
  except ValueError:
116
+ return -1 # Task not in queue
117
 
118
  def get_task_status(self, task_id: str) -> Dict:
119
+ """Get task status"""
120
  with self.lock:
121
  return self.task_status.get(task_id, {"status": "not_found"})
122
 
123
 
124
+ # Global queue manager
125
  queue_manager = QueueManager()
126
 
127
 
128
+ def validate_llm_config(api_key: str, base_url: str, model: str, lang: str = "zh-CN") -> Tuple[bool, str]:
129
+ """Validate LLM configuration"""
130
  if not api_key.strip():
131
+ return False, get_text(lang, "messages.error_api_key_empty")
132
 
133
  if not base_url.strip():
134
+ return False, get_text(lang, "messages.error_base_url_empty")
135
 
136
  if not model.strip():
137
+ return False, get_text(lang, "messages.error_model_empty")
138
 
139
+ # Simple URL format check
140
  if not (base_url.startswith("http://") or base_url.startswith("https://")):
141
+ return False, get_text(lang, "messages.error_base_url_format")
142
 
143
+ return True, get_text(lang, "messages.config_valid")
144
 
145
 
146
  def create_config_dict(
147
  url: str,
 
 
148
  function_test_enabled: bool,
149
  function_test_type: str,
150
  business_objectives: str,
 
153
  security_test_enabled: bool,
154
  api_key: str,
155
  base_url: str,
156
+ model: str,
157
+ report_language: str = "zh-CN"
 
 
 
158
  ) -> Dict[str, Any]:
159
+ """Create configuration dictionary"""
160
  config = {
161
  "target": {
162
  "url": url,
163
  "description": ""
 
164
  },
165
  "test_config": {
166
  "function_test": {
 
184
  "base_url": base_url,
185
  "temperature": 0.1
186
  },
187
+ "report": {
188
+ "language": report_language
189
+ },
190
  "browser_config": {
191
  "viewport": {"width": 1280, "height": 720},
192
  "headless": True,
 
199
 
200
 
201
  def build_test_configurations(config: Dict[str, Any]) -> list:
202
+ """Build test configuration list based on config"""
203
  tests = []
204
  tconf = config.get("test_config", {})
205
 
206
  base_browser = {
207
  "viewport": config.get("browser_config", {}).get("viewport", {"width": 1280, "height": 720}),
208
+ "headless": True, # Force headless for web interface
209
  }
210
 
211
  # function test
 
213
  if tconf["function_test"].get("type") == "ai":
214
  tests.append({
215
  "test_type": "ui_agent_langgraph",
 
216
  "enabled": True,
217
  "browser_config": base_browser,
218
  "test_specific_config": {
 
223
  else:
224
  tests += [
225
  {
226
+ "test_type": "basic_test",
 
 
 
 
 
 
 
 
227
  "enabled": True,
228
  "browser_config": base_browser,
229
  "test_specific_config": {},
 
234
  if tconf.get("ux_test", {}).get("enabled"):
235
  tests.append({
236
  "test_type": "ux_test",
 
237
  "enabled": True,
238
  "browser_config": base_browser,
239
  "test_specific_config": {},
 
243
  if tconf.get("performance_test", {}).get("enabled"):
244
  tests.append({
245
  "test_type": "performance",
 
246
  "enabled": True,
247
  "browser_config": base_browser,
248
  "test_specific_config": {},
 
252
  if tconf.get("security_test", {}).get("enabled"):
253
  tests.append({
254
  "test_type": "security",
 
255
  "enabled": True,
256
  "browser_config": base_browser,
257
  "test_specific_config": {},
 
260
  return tests
261
 
262
 
263
+ async def run_webqa_test(config: Dict[str, Any], lang: str = "zh-CN") -> Tuple[Optional[str], Optional[str], Optional[str]]:
264
+ """Run WebQA test"""
265
  try:
266
+ # Validate LLM configuration
267
  llm_config = {
268
  "api": "openai",
269
  "model": config["llm_config"]["model"],
 
272
  "temperature": config["llm_config"]["temperature"],
273
  }
274
 
275
+ # Build test configurations
276
  test_configurations = build_test_configurations(config)
277
 
278
  if not test_configurations:
279
+ return None, None, get_text(lang, "messages.no_test_types_enabled")
280
 
281
  target_url = config["target"]["url"]
 
282
  max_concurrent_tests = 1
283
+
284
+ # Execute tests
285
  parallel_mode = ParallelMode([], max_concurrent_tests=max_concurrent_tests)
286
  results, report_path, html_report_path, result_count = await parallel_mode.run(
287
  url=target_url,
288
  llm_config=llm_config,
289
  test_configurations=test_configurations,
290
+ log_cfg=config.get("log", {"level": "info"}),
291
+ report_cfg=config.get("report", {"language": lang})
292
  )
293
 
294
  return html_report_path, report_path, None
295
 
296
  except Exception as e:
297
+ error_msg = f"{get_text(lang, 'messages.test_execution_failed')}: {str(e)}\n{traceback.format_exc()}"
298
  return None, None, error_msg
299
 
300
 
301
  def submit_test(
302
  url: str,
 
 
303
  function_test_enabled: bool,
304
  function_test_type: str,
305
  business_objectives: str,
 
308
  security_test_enabled: bool,
309
  api_key: str,
310
  base_url: str,
311
+ model: str,
312
+ interface_language: str = "zh-CN"
 
 
 
313
  ) -> Tuple[str, str, bool]:
314
+ """Submit test task, return (status message, task ID, success flag)"""
315
 
316
+ # Basic validation
317
  if not url.strip():
318
+ return get_text(interface_language, "messages.error_empty_url"), "", False
319
 
320
+ # Validate at least one test is enabled
321
  if not any([function_test_enabled, ux_test_enabled, performance_test_enabled, security_test_enabled]):
322
+ return get_text(interface_language, "messages.error_no_tests"), "", False
323
 
324
+ # If function test is enabled but no business objectives set
325
  if function_test_enabled and function_test_type == "ai" and not business_objectives.strip():
326
+ return get_text(interface_language, "messages.error_no_business_objectives"), "", False
327
 
328
+ # Validate LLM configuration
329
+ valid, msg = validate_llm_config(api_key, base_url, model, interface_language)
330
  if not valid:
331
+ return f"โŒ {get_text(interface_language, 'messages.error')}: {msg}", "", False
332
 
333
+ # Create configuration
334
  config = create_config_dict(
335
  url,
336
  function_test_enabled, function_test_type, business_objectives,
337
  ux_test_enabled, performance_test_enabled, security_test_enabled,
338
+ api_key, base_url, model,
339
+ report_language=interface_language
340
  )
341
 
342
+ # Generate task ID
343
  task_id = str(uuid.uuid4())
344
 
345
+ # Add to queue
346
+ user_info = {"config": config, "submitted_at": datetime.now(), "interface_language": interface_language}
347
  position = queue_manager.add_task(task_id, user_info)
348
 
349
+ status_msg = f"{get_text(interface_language, 'messages.task_submitted')}\n{get_text(interface_language, 'messages.task_id_label')}: {task_id}\n{get_text(interface_language, 'messages.queue_position')}: {position}"
350
  if position > 1:
351
+ status_msg += f"\n{get_text(interface_language, 'messages.queue_waiting', count=position-1)}"
352
 
353
+ # Record submission history
354
  submission_history.append({
355
  "task_id": task_id,
356
  "url": url,
 
365
  return status_msg, task_id, True
366
 
367
 
368
+ def check_task_status(task_id: str, interface_language: str = "zh-CN") -> Tuple[str, str, Any]:
369
+ """Check task status"""
370
  if not task_id.strip():
371
  return (
372
+ get_text(interface_language, "status.task_id_placeholder"),
373
+ f"<div style='text-align: center; padding: 50px; color: #888;'>{get_text(interface_language, 'status.default_message')}</div>",
374
  gr.update(visible=False, value=None),
375
  )
376
 
 
378
 
379
  if status["status"] == "not_found":
380
  return (
381
+ get_text(interface_language, "messages.task_not_found"),
382
+ f"<div style='text-align: center; padding: 50px; color: #ff6b6b;'>{get_text(interface_language, 'messages.task_not_found_message')}</div>",
383
  gr.update(visible=False, value=None),
384
  )
385
 
386
  if status["status"] == "queued":
387
  position = queue_manager.get_queue_position(task_id)
388
  return (
389
+ get_text(interface_language, "messages.task_queued", position=position),
390
+ f"<div style='text-align: center; padding: 50px; color: #ffa500;'>{get_text(interface_language, 'messages.task_queued_message')}</div>",
391
  gr.update(visible=False, value=None),
392
  )
393
 
394
  if status["status"] == "running":
395
  return (
396
+ get_text(interface_language, "messages.task_running"),
397
+ f"<div style='text-align: center; padding: 50px; color: #4dabf7;'>{get_text(interface_language, 'messages.task_running_message')}</div>",
398
  gr.update(visible=False, value=None),
399
  )
400
 
401
  if status["status"] == "completed":
402
  result = status.get("result")
403
+ if result and result[0]: # html_report_path exists
404
+ # Read HTML report content
405
  try:
406
  with open(result[0], 'r', encoding='utf-8') as f:
407
  html_content = f.read()
408
+ # Wrap report in iframe to isolate its styles and avoid affecting external layout
409
+ # Inline rendering, remove inner scrolling and horizontal scrolling
410
  content = html_content
411
  m = re.search(r"<head[^>]*>", content, flags=re.I)
412
  inject_style = (
 
426
  f"srcdoc=\"{escaped}\"></iframe>"
427
  )
428
  return (
429
+ f"{get_text(interface_language, 'messages.task_completed')}\n{get_text(interface_language, 'messages.report_path')}: {result[0]}",
430
  iframe_html,
431
  gr.update(visible=True, value=result[0]),
432
  )
433
  except Exception as e:
434
  return (
435
+ f"{get_text(interface_language, 'messages.task_completed')}, but failed to read report: {str(e)}\n{get_text(interface_language, 'messages.report_path')}: {result[0]}",
436
+ f"<div style='text-align: center; padding: 50px; color: #ff6b6b;'><p>โŒ Unable to read HTML report file</p><p>{get_text(interface_language, 'messages.report_path')}๏ผš{result[0]}</p><p>{get_text(interface_language, 'messages.error_info', error=str(e))}</p></div>",
437
  gr.update(visible=True, value=result[0]),
438
  )
439
  else:
440
  return (
441
+ get_text(interface_language, "messages.task_completed_no_report"),
442
+ f"<div style='text-align: center; padding: 50px; color: #ffa500;'>{get_text(interface_language, 'messages.task_completed_no_report_message')}</div>",
443
  gr.update(visible=False, value=None),
444
  )
445
 
446
  if status["status"] == "failed":
447
+ error = status.get("error", "Unknown error")
448
  return (
449
+ get_text(interface_language, "messages.task_failed", error=error),
450
+ f"<div style='text-align: center; padding: 50px; color: #ff6b6b;'><p>{get_text(interface_language, 'messages.task_failed_message')}</p><p>{get_text(interface_language, 'messages.error_info', error=error)}</p></div>",
451
  gr.update(visible=False, value=None),
452
  )
453
 
454
  return (
455
+ get_text(interface_language, "messages.unknown_status"),
456
+ f"<div style='text-align: center; padding: 50px; color: #888;'>{get_text(interface_language, 'messages.unknown_status')}</div>",
457
  gr.update(visible=False, value=None),
458
  )
459
 
460
 
461
  async def process_queue():
462
+ """Process tasks in queue"""
463
  while True:
464
  task_id = queue_manager.get_next_task()
465
  if task_id:
466
  try:
467
  task_status = queue_manager.get_task_status(task_id)
468
  config = task_status["user_info"]["config"]
469
+ interface_language = task_status["user_info"].get("interface_language", "zh-CN")
470
 
471
+ # Execute test
472
+ html_report_path, report_path, error = await run_webqa_test(config, interface_language)
473
 
474
  if error:
475
  queue_manager.complete_task(task_id, error=error)
 
479
  except Exception as e:
480
  queue_manager.complete_task(task_id, error=str(e))
481
 
482
+ await asyncio.sleep(1) # Avoid busy waiting
483
 
484
 
485
+ def create_gradio_interface(language: str = "zh-CN"):
486
+ """Create Gradio interface with specified language"""
487
 
488
+ # Custom CSS styles
489
  custom_css = """
490
+ @import url('https://fonts.googleapis.com/css2?family=Inter:wght@300;400;500;600;700&display=swap');
491
+
492
+ /* Global font settings for better English typography */
493
+ * {
494
+ font-family: 'Inter', -apple-system, BlinkMacSystemFont, 'Segoe UI', 'Roboto', 'Oxygen', 'Ubuntu', 'Cantarell', 'Fira Sans', 'Droid Sans', 'Helvetica Neue', sans-serif;
495
+ -webkit-font-smoothing: antialiased;
496
+ -moz-osx-font-smoothing: grayscale;
497
+ }
498
+
499
+ /* Specific font for headers and titles */
500
+ h1, h2, h3, h4, h5, h6 {
501
+ font-family: 'Inter', -apple-system, BlinkMacSystemFont, 'Segoe UI', 'Roboto', sans-serif;
502
+ font-weight: 600;
503
+ letter-spacing: -0.025em;
504
+ }
505
+
506
+ /* Button and input font improvements */
507
+ button, input, textarea, select {
508
+ font-family: 'Inter', -apple-system, BlinkMacSystemFont, 'Segoe UI', 'Roboto', sans-serif;
509
+ font-weight: 400;
510
+ }
511
+
512
  #html-report { border: 1px solid #e1e5e9; border-radius: 8px; padding: 0; background: #fff; }
513
  #html-report iframe { width: 100%; height: 1800px; border: none; overflow: hidden; }
514
 
515
  .gradio-container { max-width: 1500px !important; margin: 0 auto !important; width: 100% !important; }
516
 
517
+ /* Prevent layout shrinking */
518
  .tab-nav {
519
  position: sticky;
520
  top: 0;
521
  z-index: 100;
522
  }
523
 
524
+ /* Improve form layout */
525
  .form-group {
526
  margin-bottom: 1rem;
527
  }
528
 
529
+ /* Ensure task status area doesn't shrink */
530
  .task-status-container {
531
  min-height: 400px;
532
  }
533
 
534
+ /* Remove password field hint styles */
535
  input[type="password"] {
536
  background-color: #fff !important;
537
  }
538
 
539
+ /* Top GitHub CTA button */
540
  .gh-cta-wrap { text-align: right; padding-top: 16px; }
541
  .gh-cta {
542
  display: inline-block;
543
  padding: 10px 16px;
544
  border-radius: 8px;
545
+ background: linear-gradient(90deg,#2563eb,#7c3aed); /* Blue-purple gradient, more eye-catching */
546
  color: #fff !important;
547
  text-decoration: none !important;
548
  font-weight: 600;
 
552
  }
553
  .gh-cta:hover { transform: translateY(-1px); box-shadow: 0 6px 16px rgba(0,0,0,.16); }
554
 
555
+ /* Three-column compact grid and spacing optimization */
556
  .config-grid { gap: 16px; flex-wrap: wrap; }
557
  .config-card { background:#fff; border:1px solid #e5e7eb; border-radius:10px; padding:16px; flex: 1 1 calc(50% - 8px); min-width: 300px; }
558
  .config-card h3 { margin:0 0 12px; font-size:16px; border-bottom:1px solid #f1f5f9; padding-bottom:8px; }
559
  .config-card .gradio-checkbox, .config-card .gradio-radio, .config-card .gradio-textbox { margin-bottom:10px; }
560
 
561
+ /* Unified content width container (for various Tabs) */
562
  .content-wrapper { max-width: 1500px; margin: 0 auto; width: 100%; overflow-x: auto; }
563
 
564
+ /* Table width constraints, use stronger selectors to prevent container widening */
565
  .fixed-width-table,
566
  .fixed-width-table > div,
567
  .fixed-width-table .table-wrap,
 
583
  max-width: none !important; /* Remove max-width to allow content to dictate width */
584
  }
585
 
586
+ /* Column width allocation */
587
  .fixed-width-table th:nth-child(1),
588
  .fixed-width-table td:nth-child(1),
589
  .content-wrapper .gradio-dataframe th:nth-child(1),
 
648
  vertical-align: middle !important;
649
  }
650
 
651
+ /* Table header style optimization */
652
  .fixed-width-table th,
653
  .content-wrapper .gradio-dataframe th {
654
  background-color: #f8fafc !important;
 
658
  text-align: center !important;
659
  }
660
 
661
+ /* Table row style optimization */
662
  .fixed-width-table tbody tr:nth-child(even),
663
  .content-wrapper .gradio-dataframe tbody tr:nth-child(even) {
664
  background-color: #f9fafb !important;
 
670
  transition: background-color 0.2s ease !important;
671
  }
672
 
673
+ /* Table border optimization */
674
  .fixed-width-table table,
675
  .content-wrapper .gradio-dataframe table {
676
  border-collapse: collapse !important;
 
694
  with gr.Blocks(title="WebQA Agent", theme=gr.themes.Soft(), css=custom_css) as app:
695
  with gr.Row(elem_id="app-wrapper"):
696
  with gr.Column(scale=8):
697
+ gr.Markdown(f"# {get_text(language, 'title')}")
698
+ gr.Markdown(f"## {get_text(language, 'subtitle')}")
699
+ gr.Markdown(get_text(language, "description"))
700
  with gr.Column(scale=2):
701
+ gr.HTML(f"<div class='gh-cta-wrap'><a class='gh-cta' href='https://github.com/MigoXLab/webqa-agent' target='_blank' rel='noopener'>{get_text(language, 'github_cta')}</a></div>")
702
 
703
  with gr.Tabs():
704
+ # Configuration tab
705
+ with gr.TabItem(get_text(language, "tabs.config")):
706
+ # Two-column layout: left (target config + LLM config stacked), right (test types)
707
  with gr.Row(elem_classes=["config-grid"]):
708
  with gr.Column(elem_classes=["config-card"], min_width=300, scale=0):
709
+ gr.Markdown(f"### {get_text(language, 'config.target_config')}")
710
  url = gr.Textbox(
711
+ label=get_text(language, "config.target_url"),
712
+ placeholder=get_text(language, "config.target_url_placeholder"),
713
  value="https://demo.chat-sdk.dev/",
714
+ info=get_text(language, "config.target_url_info")
715
  )
716
 
717
+ gr.Markdown(f"### {get_text(language, 'config.llm_config')}")
718
  model = gr.Textbox(
719
+ label=get_text(language, "config.model_name"),
720
  value="gpt-4.1-mini",
721
+ info=get_text(language, "config.model_name_info")
722
  )
723
  api_key = gr.Textbox(
724
+ label=get_text(language, "config.api_key"),
725
  value="",
726
+ info=get_text(language, "config.api_key_info"),
727
  type="password"
728
  )
729
  base_url = gr.Textbox(
730
+ label=get_text(language, "config.base_url"),
731
  value="",
732
+ info=get_text(language, "config.base_url_info")
733
  )
734
 
735
  with gr.Column(elem_classes=["config-card"], min_width=300, scale=0):
736
+ gr.Markdown(f"### {get_text(language, 'config.test_types')}")
737
+ function_test_enabled = gr.Checkbox(label=get_text(language, "config.function_test"), value=True)
738
 
739
  with gr.Group(visible=True) as function_test_group:
740
  function_test_type = gr.Radio(
741
+ label=get_text(language, "config.function_test_type"),
742
  choices=["default", "ai"],
743
  value="ai",
744
+ info=get_text(language, "config.function_test_type_info")
745
  )
746
  business_objectives = gr.Textbox(
747
+ label=get_text(language, "config.business_objectives"),
748
+ placeholder=get_text(language, "config.business_objectives_placeholder"),
749
+ info=get_text(language, "config.business_objectives_info")
 
750
  )
751
 
752
+ ux_test_enabled = gr.Checkbox(label=get_text(language, "config.ux_test"), value=False)
753
  performance_test_enabled = gr.Checkbox(
754
+ label=get_text(language, "config.performance_test"),
755
  value=False,
756
  interactive=False,
757
+ info=get_text(language, "config.performance_test_info")
758
  )
759
  security_test_enabled = gr.Checkbox(
760
+ label=get_text(language, "config.security_test"),
761
  value=False,
762
  interactive=False,
763
+ info=get_text(language, "config.security_test_info")
764
  )
765
 
766
  with gr.Row():
767
+ submit_btn = gr.Button(get_text(language, "config.submit_btn"), variant="primary", size="lg")
768
 
769
+ # Result display
770
+ with gr.Accordion(get_text(language, "config.submit_result"), open=False) as submit_result_accordion:
771
  submit_status = gr.Textbox(
772
+ label=get_text(language, "status.task_status"),
773
  interactive=False,
774
  lines=5,
775
  show_label=False
776
  )
777
  task_id_output = gr.Textbox(
778
+ label=get_text(language, "status.task_id"),
779
  interactive=False,
780
  visible=False
781
  )
782
 
783
+ # Status query tab
784
+ with gr.TabItem(get_text(language, "tabs.status")):
785
  with gr.Column(elem_classes=["task-status-container"]):
786
+ gr.Markdown(f"### {get_text(language, 'status.query_title')}")
787
  with gr.Row(variant="compact"):
788
  with gr.Column(min_width=300):
789
  task_id_input = gr.Textbox(
790
+ label=get_text(language, "status.task_id"),
791
+ placeholder=get_text(language, "status.task_id_placeholder"),
792
+ info=get_text(language, "status.task_id_info")
793
  )
794
  with gr.Column(min_width=100):
795
+ check_btn = gr.Button(get_text(language, "status.check_btn"), variant="secondary", size="lg")
796
 
797
  task_status_output = gr.Textbox(
798
+ label=get_text(language, "status.task_status"),
799
  interactive=False,
800
  lines=5
801
  )
802
 
803
+ # HTML report display + download (button above preview)
804
+ gr.Markdown(f"### {get_text(language, 'status.test_report')}")
805
  download_file = gr.File(
806
+ label=get_text(language, "status.html_report"),
807
  interactive=False,
808
  visible=False,
809
  file_types=[".html"],
810
  )
811
  html_output = gr.HTML(
812
+ label=get_text(language, "status.html_report"),
813
  visible=True,
814
  elem_id="html-report",
815
  show_label=False,
816
+ value=f"<div style='text-align: center; padding: 50px; color: #888;'>{get_text(language, 'status.default_message')}</div>"
817
  )
818
 
819
+ # History records
820
+ with gr.TabItem(get_text(language, "tabs.history")) as history_tab:
821
  with gr.Column(elem_classes=["content-wrapper"]):
822
+ gr.Markdown(f"### {get_text(language, 'history.title')}")
823
  history_table = gr.Dataframe(
824
+ headers=get_text(language, "history.headers"),
825
  row_count=(0, "dynamic"),
826
  interactive=False,
827
  elem_classes=["fixed-width-table"]
828
  )
829
+ refresh_history_btn = gr.Button(get_text(language, "history.refresh_btn"), variant="secondary", size="lg")
830
 
831
 
832
+ # Event bindings
833
  def submit_and_expand(*args):
834
+ """Submit task and expand results"""
835
+ status_msg, task_id, success = submit_test(*args, interface_language=language)
836
  if success:
837
  return status_msg, task_id, gr.Accordion(open=True)
838
  else:
839
  return status_msg, task_id, gr.Accordion(open=True)
840
 
841
+ # Auto expand results and refresh history once after submission
842
  submit_btn.click(
843
  fn=submit_and_expand,
844
  inputs=[
 
846
  function_test_enabled, function_test_type, business_objectives,
847
  ux_test_enabled, performance_test_enabled, security_test_enabled,
848
  api_key, base_url, model
 
849
  ],
850
  outputs=[submit_status, task_id_output, submit_result_accordion]
851
  )
852
 
853
  submit_btn.click(
854
+ fn=lambda: get_history_rows(language),
855
  inputs=[],
856
  outputs=[history_table]
857
  )
858
 
859
  check_btn.click(
860
+ fn=lambda task_id: check_task_status(task_id, language),
861
  inputs=[task_id_input],
862
  outputs=[task_status_output, html_output, download_file]
863
  )
864
 
865
+ # Refresh history records
866
+ def get_history_rows(lang):
867
  rows = []
868
  for item in reversed(submission_history[-100:]):
869
  rows.append([
 
876
  ])
877
  return rows
878
 
879
+ # Bind refresh button in "Submission History" Tab
880
  refresh_history_btn.click(
881
+ fn=lambda: get_history_rows(language),
882
  inputs=[],
883
  outputs=[history_table]
884
  )
885
 
886
+ # Bind "Submission History" Tab selection event, auto refresh history records
887
  history_tab.select(
888
+ fn=lambda: get_history_rows(language),
889
  inputs=[],
890
  outputs=[history_table]
891
  )
892
 
893
+ # Clear report display when input changes
894
  task_id_input.change(
895
+ fn=lambda x: ("", f"<div style='text-align: center; padding: 50px; color: #888;'>{get_text(language, 'status.input_change_message')}</div>"),
896
  inputs=[task_id_input],
897
  outputs=[task_status_output, html_output]
898
  )
 
901
 
902
 
903
  if __name__ == "__main__":
904
+ # Start queue processing
905
  import threading
906
 
907
  def run_queue_processor():
908
+ """Run queue processor in background thread"""
909
  loop = asyncio.new_event_loop()
910
  asyncio.set_event_loop(loop)
911
  loop.run_until_complete(process_queue())
 
913
  queue_thread = threading.Thread(target=run_queue_processor, daemon=True)
914
  queue_thread.start()
915
 
916
+ # Create and launch Gradio application
917
  app = create_gradio_interface()
918
  app.launch(
919
  server_name="0.0.0.0",
app_gradio/gradio_i18n.json ADDED
@@ -0,0 +1,180 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "zh-CN": {
3
+ "title": "๐Ÿค– WebQA Agent",
4
+ "subtitle": "ๅ…จ่‡ชๅŠจ็ฝ‘้กต่ฏ„ไผฐๆต‹่ฏ• Agent๏ผŒไธ€้”ฎ่ฏŠๆ–ญๅŠŸ่ƒฝไธŽไบคไบ’ไฝ“้ชŒ",
5
+ "description": "้…็ฝฎๅ‚ๆ•ฐๅนถ่ฟ่กŒ็ฝ‘็ซ™่ดจ้‡ๆฃ€ๆต‹ๆต‹่ฏ•ใ€‚็ณป็ปŸๆ”ฏๆŒๆŽ’้˜Ÿๆœบๅˆถ๏ผŒ็กฎไฟ็จณๅฎš่ฟ่กŒใ€‚",
6
+ "github_cta": "๐ŸŒŸ ๅœจ GitHub ไธŠไธบๆˆ‘ไปฌ Star",
7
+ "tabs": {
8
+ "config": "๐Ÿ“ ๆต‹่ฏ•้…็ฝฎ",
9
+ "status": "๐Ÿ“Š ไปปๅŠก็Šถๆ€",
10
+ "history": "๐Ÿ—‚๏ธ ๆไบคๅކๅฒ"
11
+ },
12
+ "config": {
13
+ "target_config": "๐ŸŽฏ ็›ฎๆ ‡้…็ฝฎ",
14
+ "target_url": "็›ฎๆ ‡URL",
15
+ "target_url_placeholder": "https://example.com",
16
+ "target_url_info": "่ฆๆต‹่ฏ•็š„็ฝ‘็ซ™URL",
17
+ "llm_config": "๐Ÿค– LLM้…็ฝฎ",
18
+ "model_name": "ๆจกๅž‹ๅ็งฐ",
19
+ "model_name_info": "ไฝฟ็”จ็š„่ฏญ่จ€ๆจกๅž‹ (OPENAI SDK ๅ…ผๅฎนๆ ผๅผ)",
20
+ "api_key": "API Key",
21
+ "api_key_info": "LLMๆœๅŠก็š„APIๅฏ†้’ฅ",
22
+ "base_url": "Base URL",
23
+ "base_url_info": "LLMๆœๅŠก็š„ๅŸบ็ก€URL",
24
+ "report_language": "๐ŸŒ ๆŠฅๅ‘Š่ฏญ่จ€",
25
+ "report_language_label": "้€‰ๆ‹ฉๆŠฅๅ‘Š่ฏญ่จ€",
26
+ "report_language_info": "้€‰ๆ‹ฉๆต‹่ฏ•ๆŠฅๅ‘Šๅ’Œๆ—ฅๅฟ—็š„ๆ˜พ็คบ่ฏญ่จ€",
27
+ "test_types": "๐Ÿงช ๆต‹่ฏ•็ฑปๅž‹",
28
+ "function_test": "ๅŠŸ่ƒฝๆต‹่ฏ•",
29
+ "function_test_type": "ๅŠŸ่ƒฝๆต‹่ฏ•็ฑปๅž‹",
30
+ "function_test_type_info": "default: ้ๅކๆต‹่ฏ•๏ผŒ่ฆ†็›–ๅฏ็‚นๅ‡ปๅ…ƒ็ด ๅ’Œๆ‰€ๆœ‰้“พๆŽฅ\n ai: ๅŸบไบŽ่ง†่ง‰ๆจกๅž‹็š„ๆ™บ่ƒฝๆต‹่ฏ•๏ผŒ่ƒฝๅคŸๆจกๆ‹Ÿ็œŸๅฎž็”จๆˆท่กŒไธบใ€็†่งฃไธšๅŠกไธŠไธ‹ๆ–‡๏ผŒ้ชŒ่ฏ็ฝ‘้กตๅŠŸ่ƒฝใ€‚",
31
+ "business_objectives": "AIๅŠŸ่ƒฝๆต‹่ฏ•ไธšๅŠก็›ฎๆ ‡",
32
+ "business_objectives_placeholder": "ๆต‹่ฏ•ๅฏน่ฏๅŠŸ่ƒฝ๏ผŒ็”Ÿๆˆ2ไธช็”จไพ‹",
33
+ "business_objectives_info": "ai: ๅฎšๅˆถไธๅŒๅœบๆ™ฏ๏ผŒ็ฒพๅ‡†ๅ‘็Žฐๅคๆ‚ๅŠŸ่ƒฝ้—ฎ้ข˜",
34
+ "ux_test": "็”จๆˆทไฝ“้ชŒๆต‹่ฏ•",
35
+ "performance_test": "ๆ€ง่ƒฝๆต‹่ฏ•",
36
+ "performance_test_info": "็›ฎๅ‰ๅœจ ModelScope ็‰ˆๆœฌไธๅฏ็”จ๏ผ›่ฏทๅ‰ๅพ€ GitHub ไฝ“้ชŒ",
37
+ "security_test": "ๅฎ‰ๅ…จๆต‹่ฏ•",
38
+ "security_test_info": "็›ฎๅ‰ๅœจ ModelScope ็‰ˆๆœฌไธๅฏ็”จ๏ผ›่ฏทๅ‰ๅพ€ GitHub ไฝ“้ชŒ",
39
+ "submit_btn": "๐Ÿš€ ๆไบคๆต‹่ฏ•",
40
+ "submit_btn_submitting": "๐Ÿš€ ๆไบคไธญ...",
41
+ "submit_result": "๐Ÿ“„ ไปปๅŠกๆไบค็ป“ๆžœ"
42
+ },
43
+ "status": {
44
+ "query_title": "ๆŸฅ่ฏขไปปๅŠกๆ‰ง่กŒ็Šถๆ€",
45
+ "task_id": "ไปปๅŠกID",
46
+ "task_id_placeholder": "่พ“ๅ…ฅไปปๅŠกIDๆŸฅ่ฏข็Šถๆ€",
47
+ "task_id_info": "ไปŽๆต‹่ฏ•้…็ฝฎ้กต้ข่Žทๅ–็š„ไปปๅŠกID",
48
+ "check_btn": "๐Ÿ” ๆŸฅ่ฏข็Šถๆ€",
49
+ "task_status": "ไปปๅŠก็Šถๆ€",
50
+ "test_report": "๐Ÿ“‹ ๆต‹่ฏ•ๆŠฅๅ‘Š",
51
+ "html_report": "HTMLๆŠฅๅ‘Š",
52
+ "default_message": "๐Ÿ“„ ่ฏทๅ…ˆๆŸฅ่ฏขไปปๅŠก็Šถๆ€๏ผŒๆˆๅŠŸๅŽๅฐ†ๅœจๆญคๆ˜พ็คบๆต‹่ฏ•ๆŠฅๅ‘Š",
53
+ "input_change_message": "๐Ÿ“„ ่ฏท็‚นๅ‡ปๆŸฅ่ฏข็Šถๆ€ๆŒ‰้’ฎ่Žทๅ–ๆœ€ๆ–ฐ็Šถๆ€"
54
+ },
55
+ "history": {
56
+ "title": "ๆไบค่ฎฐๅฝ•",
57
+ "headers": ["ๆไบคๆ—ถ้—ด", "ไปปๅŠกID", "URL", "ๅŠŸ่ƒฝๆต‹่ฏ•", "็ฑปๅž‹", "UXๆต‹่ฏ•"],
58
+ "refresh_btn": "๐Ÿ”„ ๅˆทๆ–ฐๅކๅฒ่ฎฐๅฝ•"
59
+ },
60
+ "messages": {
61
+ "error_empty_url": "โŒ ้”™่ฏฏ๏ผš็›ฎๆ ‡URLไธ่ƒฝไธบ็ฉบ",
62
+ "error_no_tests": "โŒ ้”™่ฏฏ๏ผš่‡ณๅฐ‘้œ€่ฆๅฏ็”จไธ€ไธชๆต‹่ฏ•็ฑปๅž‹",
63
+ "error_no_business_objectives": "โŒ ้”™่ฏฏ๏ผšAIๅŠŸ่ƒฝๆต‹่ฏ•้œ€่ฆ่ฎพ็ฝฎไธšๅŠก็›ฎๆ ‡",
64
+ "error_api_key_empty": "API Keyไธ่ƒฝไธบ็ฉบ",
65
+ "error_base_url_empty": "Base URLไธ่ƒฝไธบ็ฉบ",
66
+ "error_model_empty": "ๆจกๅž‹ๅ็งฐไธ่ƒฝไธบ็ฉบ",
67
+ "error_base_url_format": "Base URLๆ ผๅผไธๆญฃ็กฎ๏ผŒๅบ”ไปฅhttp://ๆˆ–https://ๅผ€ๅคด",
68
+ "config_valid": "้…็ฝฎ้ชŒ่ฏ้€š่ฟ‡",
69
+ "task_submitted": "โœ… ไปปๅŠกๅทฒๆไบค๏ผ",
70
+ "task_id_label": "ไปปๅŠกID",
71
+ "queue_position": "ๅฝ“ๅ‰้˜Ÿๅˆ—ไฝ็ฝฎ",
72
+ "queue_waiting": "โณ ่ฏท่€ๅฟƒ็ญ‰ๅพ…๏ผŒๅ‰้ข่ฟ˜ๆœ‰ {count} ไธชไปปๅŠกๅœจๆŽ’้˜Ÿ",
73
+ "task_not_found": "โŒ ไปปๅŠกไธๅญ˜ๅœจ",
74
+ "task_not_found_message": "โŒ ไปปๅŠกไธๅญ˜ๅœจ๏ผŒ่ฏทๆฃ€ๆŸฅไปปๅŠกIDๆ˜ฏๅฆๆญฃ็กฎ",
75
+ "task_queued": "โณ ไปปๅŠกๆŽ’้˜Ÿไธญ๏ผŒๅฝ“ๅ‰ไฝ็ฝฎ: {position}",
76
+ "task_queued_message": "โณ ไปปๅŠกๆญฃๅœจๆŽ’้˜Ÿไธญ๏ผŒ่ฏท็จๅŽๅ†ๆŸฅ่ฏข",
77
+ "task_running": "๐Ÿš€ ไปปๅŠกๆญฃๅœจๆ‰ง่กŒไธญ๏ผŒ่ฏท็จๅ€™...",
78
+ "task_running_message": "๐Ÿš€ ไปปๅŠกๆญฃๅœจๆ‰ง่กŒไธญ๏ผŒ่ฏท็จๅŽๅ†ๆŸฅ่ฏข็ป“ๆžœ",
79
+ "task_completed": "โœ… ไปปๅŠกๆ‰ง่กŒๅฎŒๆˆ๏ผ",
80
+ "report_path": "ๆŠฅๅ‘Š่ทฏๅพ„",
81
+ "task_completed_no_report": "โœ… ไปปๅŠกๆ‰ง่กŒๅฎŒๆˆ๏ผŒไฝ†ๆœช็”ŸๆˆHTMLๆŠฅๅ‘Š",
82
+ "task_completed_no_report_message": "โš ๏ธ ๆต‹่ฏ•ๆ‰ง่กŒๅฎŒๆˆ๏ผŒไฝ†ๆœช็”ŸๆˆHTMLๆŠฅๅ‘Š",
83
+ "task_failed": "โŒ ไปปๅŠกๆ‰ง่กŒๅคฑ่ดฅ: {error}",
84
+ "task_failed_message": "โŒ ไปปๅŠกๆ‰ง่กŒๅคฑ่ดฅ",
85
+ "error_info": "้”™่ฏฏไฟกๆฏ๏ผš{error}",
86
+ "unknown_status": "โ“ ๆœช็Ÿฅ็Šถๆ€",
87
+ "test_execution_failed": "ๆต‹่ฏ•ๆ‰ง่กŒๅคฑ่ดฅ",
88
+ "no_test_types_enabled": "้”™่ฏฏ๏ผšๆœชๅฏ็”จไปปไฝ•ๆต‹่ฏ•็ฑปๅž‹"
89
+ }
90
+ },
91
+ "en-US": {
92
+ "title": "๐Ÿค– WebQA Agent",
93
+ "subtitle": "Autonomous web browser agent that audits performance, functionality & UX for QA and vibe-coding creators.",
94
+ "description": "Configure parameters and run website quality detection tests. System supports queue mechanism for stable operation.",
95
+ "github_cta": "๐ŸŒŸ Star us on GitHub",
96
+ "tabs": {
97
+ "config": "๐Ÿ“ Test Configuration",
98
+ "status": "๐Ÿ“Š Task Status",
99
+ "history": "๐Ÿ—‚๏ธ Submission History"
100
+ },
101
+ "config": {
102
+ "target_config": "๐ŸŽฏ Target Configuration",
103
+ "target_url": "Target URL",
104
+ "target_url_placeholder": "https://example.com",
105
+ "target_url_info": "Website URL to test",
106
+ "llm_config": "๐Ÿค– LLM Configuration",
107
+ "model_name": "Model Name",
108
+ "model_name_info": "Language model to use (OPENAI SDK compatible format)",
109
+ "api_key": "API Key",
110
+ "api_key_info": "API key for LLM service",
111
+ "base_url": "Base URL",
112
+ "base_url_info": "Base URL for LLM service",
113
+ "report_language": "๐ŸŒ Report Language",
114
+ "report_language_label": "Select Report Language",
115
+ "report_language_info": "Choose display language for test reports and logs",
116
+ "test_types": "๐Ÿงช Test Types",
117
+ "function_test": "Function Test",
118
+ "function_test_type": "Function Test Type",
119
+ "function_test_type_info": "default: Traverse clickable elements & links.\n ai: Vision-model intelligent test simulating users & validating functionality.",
120
+ "business_objectives": "AI Function Test Business Objectives",
121
+ "business_objectives_placeholder": "Test chat functionality, generate 2 test cases",
122
+ "business_objectives_info": "ai: Customize different scenarios, accurately find complex functional issues",
123
+ "ux_test": "User Experience Test",
124
+ "performance_test": "Performance Test",
125
+ "performance_test_info": "Currently unavailable in HuggingFace version; please visit GitHub for experience",
126
+ "security_test": "Security Test",
127
+ "security_test_info": "Currently unavailable in Huggingface version; please visit GitHub for experience",
128
+ "submit_btn": "๐Ÿš€ Submit Test",
129
+ "submit_btn_submitting": "๐Ÿš€ Submitting...",
130
+ "submit_result": "๐Ÿ“„ Task Submission Result"
131
+ },
132
+ "status": {
133
+ "query_title": "Query Task Execution Status",
134
+ "task_id": "Task ID",
135
+ "task_id_placeholder": "Enter task ID to query status",
136
+ "task_id_info": "Task ID obtained from test configuration page",
137
+ "check_btn": "๐Ÿ” Check Status",
138
+ "task_status": "Task Status",
139
+ "test_report": "๐Ÿ“‹ Test Report",
140
+ "html_report": "HTML Report",
141
+ "default_message": "๐Ÿ“„ Please query task status first, report will be displayed here after success",
142
+ "input_change_message": "๐Ÿ“„ Please click the check status button to get the latest status"
143
+ },
144
+ "history": {
145
+ "title": "Submission Records",
146
+ "headers": ["Submit Time", "Task ID", "URL", "Function Test", "Type", "UX Test"],
147
+ "refresh_btn": "๐Ÿ”„ Refresh History"
148
+ },
149
+ "messages": {
150
+ "error_empty_url": "โŒ Error: Target URL cannot be empty",
151
+ "error_no_tests": "โŒ Error: At least one test type must be enabled",
152
+ "error_no_business_objectives": "โŒ Error: AI function test requires business objectives",
153
+ "error_api_key_empty": "API Key cannot be empty",
154
+ "error_base_url_empty": "Base URL cannot be empty",
155
+ "error_model_empty": "Model name cannot be empty",
156
+ "error_base_url_format": "Base URL format is incorrect, should start with http:// or https://",
157
+ "config_valid": "Configuration validation passed",
158
+ "task_submitted": "โœ… Task submitted!",
159
+ "task_id_label": "Task ID",
160
+ "queue_position": "Current queue position",
161
+ "queue_waiting": "โณ Please wait patiently, {count} tasks are still queuing ahead",
162
+ "task_not_found": "โŒ Task not found",
163
+ "task_not_found_message": "โŒ Task not found, please check if the task ID is correct",
164
+ "task_queued": "โณ Task queued, current position: {position}",
165
+ "task_queued_message": "โณ Task is queuing, please check again later",
166
+ "task_running": "๐Ÿš€ Task is running, please wait...",
167
+ "task_running_message": "๐Ÿš€ Task is running, please check results later",
168
+ "task_completed": "โœ… Task completed!",
169
+ "report_path": "Report path",
170
+ "task_completed_no_report": "โœ… Task completed but no HTML report generated",
171
+ "task_completed_no_report_message": "โš ๏ธ Test execution completed but no HTML report generated",
172
+ "task_failed": "โŒ Task execution failed: {error}",
173
+ "task_failed_message": "โŒ Task execution failed",
174
+ "error_info": "Error info: {error}",
175
+ "unknown_status": "โ“ Unknown status",
176
+ "test_execution_failed": "Test execution failed",
177
+ "no_test_types_enabled": "Error: No test types enabled"
178
+ }
179
+ }
180
+ }
config/config.yaml.example CHANGED
@@ -1,13 +1,13 @@
1
  target:
2
  url: https://baidu.com
3
- description: ๆต‹่ฏ•ๆœ็ดขๅŠŸ่ƒฝ
4
- # max_concurrent_tests: 2 # ๅฏ้€‰๏ผŒ้ป˜่ฎคๅนถ่กŒ2
5
 
6
- test_config: # ๆต‹่ฏ•้กน้…็ฝฎ
7
  function_test:
8
  enabled: True
9
  type: ai # default or ai
10
- business_objectives: ๆต‹่ฏ•็™พๅบฆๆœ็ดขๅŠŸ่ƒฝ๏ผŒ็”Ÿๆˆ3ไธช็”จไพ‹
11
  ux_test:
12
  enabled: True
13
  performance_test:
@@ -15,18 +15,21 @@ test_config: # ๆต‹่ฏ•้กน้…็ฝฎ
15
  security_test:
16
  enabled: False
17
 
18
- llm_config: # ่ง†่ง‰ๆจกๅž‹้…็ฝฎ๏ผŒๅฝ“ๅ‰ไป…ๆ”ฏๆŒ OpenAI sdkๆ ผๅผๅ…ผๅฎน
19
- model: gpt-4.1 # ๆŽจ่ไฝฟ็”จ
20
  api_key: your_api_key
21
  base_url: https://api.example.com/v1
22
- temperature: 0.1 # ๅฏ้€‰๏ผŒ้ป˜่ฎค 0.1
23
- # top_p: 0.9 # ๅฏ้€‰๏ผŒไธ่ฎพ็ฝฎๅˆ™ไธไผ ่ฏฅๅ‚ๆ•ฐ
24
 
25
  browser_config:
26
  viewport: {"width": 1280, "height": 720}
27
- headless: False # Docker็Žฏๅขƒไผš่‡ชๅŠจ่ฆ†็›–ไธบTrue
28
  language: zh-CN
29
  cookies: []
30
 
 
 
 
31
  log:
32
- level: info
 
1
  target:
2
  url: https://baidu.com
3
+ description: Test search functionality
4
+ # max_concurrent_tests: 2 # Optional, default parallel 2
5
 
6
+ test_config: # Test configuration
7
  function_test:
8
  enabled: True
9
  type: ai # default or ai
10
+ business_objectives: Test Baidu search functionality, generate 3 test cases
11
  ux_test:
12
  enabled: True
13
  performance_test:
 
15
  security_test:
16
  enabled: False
17
 
18
+ llm_config: # LLM configuration, currently only supports OpenAI SDK compatible format
19
+ model: gpt-4.1 # Recommended
20
  api_key: your_api_key
21
  base_url: https://api.example.com/v1
22
+ temperature: 0.1 # Optional, default 0.1
23
+ # top_p: 0.9 # Optional, if not set, this parameter will not be passed
24
 
25
  browser_config:
26
  viewport: {"width": 1280, "height": 720}
27
+ headless: False # Docker environment will automatically override to True
28
  language: zh-CN
29
  cookies: []
30
 
31
+ report:
32
+ language: en-US # zh-CN, en-US
33
+
34
  log:
35
+ level: info
docker-compose.yml CHANGED
@@ -5,19 +5,19 @@ services:
5
  image: mmmay0722/webqa-agent:latest
6
  container_name: webqa-agent
7
  volumes:
8
- # ๆŒ‚่ฝฝ้…็ฝฎๆ–‡ไปถ
9
  - ./config:/app/config:ro
10
- # ๆŒ‚่ฝฝๆ—ฅๅฟ—็›ฎๅฝ•
11
  - ./logs:/app/logs
12
- # ๆŒ‚่ฝฝๆต‹่ฏ•็ป“ๆžœ็›ฎๅฝ•
13
  - ./reports:/app/reports
14
  environment:
15
- - OPENAI_API_KEY=${OPENAI_API_KEY} #ไผ˜ๅ…ˆไฝฟ็”จ็Žฏๅขƒๅ˜้‡
16
  - OPENAI_BASE_URL=${OPENAI_BASE_URL}
17
  - PYTHONPATH=/app
18
  - PYTHONUNBUFFERED=1
19
- - DOCKER_ENV=true # ๆ ‡่ฏ†Docker็Žฏๅขƒ๏ผŒๅผบๅˆถheadlessๆจกๅผ
20
- restart: "no" # ๅผ€ๅ‘ๆจกๅผไธ่‡ชๅŠจ้‡ๅฏ๏ผŒไพฟไบŽ่ฐƒ่ฏ•
21
  networks:
22
  - app-network
23
 
 
5
  image: mmmay0722/webqa-agent:latest
6
  container_name: webqa-agent
7
  volumes:
8
+ # Mount configuration file
9
  - ./config:/app/config:ro
10
+ # Mount logs directory
11
  - ./logs:/app/logs
12
+ # Mount test results directory
13
  - ./reports:/app/reports
14
  environment:
15
+ - OPENAI_API_KEY=${OPENAI_API_KEY} # Use environment variable with priority
16
  - OPENAI_BASE_URL=${OPENAI_BASE_URL}
17
  - PYTHONPATH=/app
18
  - PYTHONUNBUFFERED=1
19
+ - DOCKER_ENV=true # Identify Docker environment, force headless mode
20
+ restart: "no" # Development mode no auto-restart for easier debugging
21
  networks:
22
  - app-network
23
 
start.sh CHANGED
@@ -1,22 +1,22 @@
1
  #!/bin/bash
2
 
3
- # WebQA Agent Docker ๅฏๅŠจ่„šๆœฌ
4
- # ๆ”ฏๆŒๆœฌๅœฐๅ’Œ่ฟœ็จ‹้ƒจ็ฝฒๆจกๅผ
5
 
6
  set -e
7
 
8
  REPO_BASE_URL="https://raw.githubusercontent.com/MigoXLab/webqa-agent"
9
  BRANCH="${WEBQA_BRANCH:-main}"
10
 
11
- echo "๐Ÿš€ ๅฏๅŠจ WebQA Agent Docker ๅฎนๅ™จ..."
12
 
13
- # ๅˆ›ๅปบๅฟ…่ฆ็›ฎๅฝ•
14
  mkdir -p config logs reports
15
 
16
- # ๆฃ€ๆŸฅ้…็ฝฎๆ–‡ไปถๆ˜ฏๅฆๅญ˜ๅœจ
17
  if [ ! -f "config.yaml" ] && [ ! -f "config/config.yaml" ]; then
18
- echo "โŒ ้…็ฝฎๆ–‡ไปถไธๅญ˜ๅœจ"
19
- echo "่ฏทๅ…ˆไธ‹่ฝฝ้…็ฝฎๆ–‡ไปถๆจกๆฟ๏ผš"
20
  if [ "$BRANCH" = "main" ]; then
21
  echo "curl -fsSL https://raw.githubusercontent.com/MigoXLab/webqa-agent/main/config/config.yaml.example -o config.yaml"
22
  else
@@ -25,87 +25,87 @@ if [ ! -f "config.yaml" ] && [ ! -f "config/config.yaml" ]; then
25
  exit 1
26
  fi
27
 
28
- # ไธ‹่ฝฝ docker-compose.yml๏ผˆๅฆ‚ๆžœไธๅญ˜ๅœจ๏ผ‰
29
  if [ ! -f "docker-compose.yml" ]; then
30
- echo "๐Ÿ“ฅ ไธ‹่ฝฝ docker-compose.yml..."
31
  curl -fsSL "$REPO_BASE_URL/$BRANCH/docker-compose.yml" -o docker-compose.yml || {
32
- echo "โŒ ไธ‹่ฝฝ docker-compose.yml ๅคฑ่ดฅ"
33
  exit 1
34
  }
35
  fi
36
 
37
- # ็กฎๅฎš้…็ฝฎๆ–‡ไปถ่ทฏๅพ„
38
  if [ -f "config.yaml" ]; then
39
  CONFIG_FILE="config.yaml"
40
- echo "โœ… ๆ‰พๅˆฐ้…็ฝฎๆ–‡ไปถ: config.yaml"
41
  elif [ -f "config/config.yaml" ]; then
42
  CONFIG_FILE="config/config.yaml"
43
- echo "โœ… ๆ‰พๅˆฐ้…็ฝฎๆ–‡ไปถ: config/config.yaml"
44
  else
45
- echo "โŒ ้”™่ฏฏ: ้…็ฝฎๆ–‡ไปถไธๅญ˜ๅœจ"
46
  exit 1
47
  fi
48
 
49
- # ็ฎ€ๅŒ–้…็ฝฎ้ชŒ่ฏ
50
- echo "๐Ÿ” ้ชŒ่ฏ้…็ฝฎๆ–‡ไปถ..."
51
 
52
- # ๆฃ€ๆŸฅ YAML ่ฏญๆณ•๏ผˆไผ˜ๅ…ˆไฝฟ็”จ yq๏ผŒๅ…ถๆฌกไฝฟ็”จ Python+PyYAML๏ผ‰
53
  YAML_STATUS=0
54
  if command -v yq >/dev/null 2>&1; then
55
  if ! yq eval '.' "$CONFIG_FILE" >/dev/null 2>&1; then
56
- echo "โŒ ้…็ฝฎๆ–‡ไปถYAML่ฏญๆณ•้”™่ฏฏ (yqๆฃ€ๆŸฅ)"
57
  YAML_STATUS=1
58
  fi
59
  elif python3 -c "import yaml" >/dev/null 2>&1; then
60
  if ! python3 -c "import yaml; yaml.safe_load(open('$CONFIG_FILE'))" >/dev/null 2>&1; then
61
- echo "โŒ ้…็ฝฎๆ–‡ไปถYAML่ฏญๆณ•้”™่ฏฏ (PyYAMLๆฃ€ๆŸฅ)"
62
  YAML_STATUS=1
63
  fi
64
  else
65
- echo "โš ๏ธ ่ทณ่ฟ‡YAML่ฏญๆณ•ๆฃ€ๆŸฅ (ๆœชๅฎ‰่ฃ…yqๆˆ–PyYAML)"
66
  fi
67
 
68
  if [ $YAML_STATUS -ne 0 ]; then
69
  exit 1
70
  fi
71
 
72
- # ๅŸบๆœฌๅญ—ๆฎตๆฃ€ๆŸฅ
73
  if ! grep -q "url:" "$CONFIG_FILE"; then
74
- echo "โŒ ๆœชๆ‰พๅˆฐ target.url ้…็ฝฎ"
75
  exit 1
76
  fi
77
 
78
  if ! grep -q "llm_config:" "$CONFIG_FILE"; then
79
- echo "โŒ ๆœชๆ‰พๅˆฐ llm_config ้…็ฝฎ"
80
  exit 1
81
  fi
82
 
83
  if ! grep -q "test_config:" "$CONFIG_FILE"; then
84
- echo "โŒ ๆœชๆ‰พๅˆฐ test_config ้…็ฝฎ"
85
  exit 1
86
  fi
87
 
88
- # ๆฃ€ๆŸฅๆ˜ฏๅฆๆœ‰ๅฏ็”จ็š„ๆต‹่ฏ• (ๆ”ฏๆŒ True/true)
89
  if ! grep -i "enabled: *true" "$CONFIG_FILE"; then
90
- echo "โŒ ๆ‰€ๆœ‰ๆต‹่ฏ•้ƒฝๅทฒ็ฆ็”จ๏ผŒ่ฏท่‡ณๅฐ‘ๅฏ็”จไธ€ไธชๆต‹่ฏ•้กน"
91
  exit 1
92
  fi
93
 
94
- # ๆฃ€ๆŸฅ็Žฏๅขƒๅ˜้‡ๆˆ–้…็ฝฎๆ–‡ไปถไธญ็š„API Key
95
  if [ -z "$OPENAI_API_KEY" ] && ! grep -q "api_key:" "$CONFIG_FILE"; then
96
- echo "โŒ LLM API Key ๆœช้…็ฝฎ (้œ€่ฆ็Žฏๅขƒๅ˜้‡ OPENAI_API_KEY ๆˆ–้…็ฝฎๆ–‡ไปถไธญ็š„ llm_config.api_key)"
97
  exit 1
98
  fi
99
 
100
- echo "โœ… ๅŸบๆœฌ้…็ฝฎๆฃ€ๆŸฅ้€š่ฟ‡"
101
 
102
- # ๅˆ›ๅปบๅฟ…่ฆ็š„็›ฎๅฝ•
103
  mkdir -p logs reports
104
 
105
- # ๅฏๅŠจๅฎนๅ™จ
106
- echo "๐Ÿš€ ๅฏๅŠจๅฎนๅ™จ..."
107
  docker-compose up
108
 
109
- echo "โœ… ๅฎนๅ™จๅฏๅŠจๅฎŒๆˆ๏ผ"
110
- echo "๐Ÿ“‹ ๆŸฅ็œ‹ๆ—ฅๅฟ—: docker-compose logs -f"
111
- echo "๐Ÿ›‘ ๅœๆญขๆœๅŠก: docker-compose down"
 
1
  #!/bin/bash
2
 
3
+ # WebQA Agent Docker startup script
4
+ # Supports local and remote deployment modes
5
 
6
  set -e
7
 
8
  REPO_BASE_URL="https://raw.githubusercontent.com/MigoXLab/webqa-agent"
9
  BRANCH="${WEBQA_BRANCH:-main}"
10
 
11
+ echo "๐Ÿš€ Starting WebQA Agent Docker container..."
12
 
13
+ # Create necessary directories
14
  mkdir -p config logs reports
15
 
16
+ # Check if configuration file exists
17
  if [ ! -f "config.yaml" ] && [ ! -f "config/config.yaml" ]; then
18
+ echo "โŒ Configuration file not found"
19
+ echo "Please download configuration file template first:"
20
  if [ "$BRANCH" = "main" ]; then
21
  echo "curl -fsSL https://raw.githubusercontent.com/MigoXLab/webqa-agent/main/config/config.yaml.example -o config.yaml"
22
  else
 
25
  exit 1
26
  fi
27
 
28
+ # Download docker-compose.yml (if not exists)
29
  if [ ! -f "docker-compose.yml" ]; then
30
+ echo "๐Ÿ“ฅ Downloading docker-compose.yml..."
31
  curl -fsSL "$REPO_BASE_URL/$BRANCH/docker-compose.yml" -o docker-compose.yml || {
32
+ echo "โŒ Failed to download docker-compose.yml"
33
  exit 1
34
  }
35
  fi
36
 
37
+ # Determine configuration file path
38
  if [ -f "config.yaml" ]; then
39
  CONFIG_FILE="config.yaml"
40
+ echo "โœ… Found configuration file: config.yaml"
41
  elif [ -f "config/config.yaml" ]; then
42
  CONFIG_FILE="config/config.yaml"
43
+ echo "โœ… Found configuration file: config/config.yaml"
44
  else
45
+ echo "โŒ Error: Configuration file not found"
46
  exit 1
47
  fi
48
 
49
+ # Simplified configuration validation
50
+ echo "๐Ÿ” Validating configuration file..."
51
 
52
+ # Check YAML syntax (prefer yq, fallback to Python+PyYAML)
53
  YAML_STATUS=0
54
  if command -v yq >/dev/null 2>&1; then
55
  if ! yq eval '.' "$CONFIG_FILE" >/dev/null 2>&1; then
56
+ echo "โŒ Configuration file YAML syntax error (yq check)"
57
  YAML_STATUS=1
58
  fi
59
  elif python3 -c "import yaml" >/dev/null 2>&1; then
60
  if ! python3 -c "import yaml; yaml.safe_load(open('$CONFIG_FILE'))" >/dev/null 2>&1; then
61
+ echo "โŒ Configuration file YAML syntax error (PyYAML check)"
62
  YAML_STATUS=1
63
  fi
64
  else
65
+ echo "โš ๏ธ Skipping YAML syntax check (yq or PyYAML not installed)"
66
  fi
67
 
68
  if [ $YAML_STATUS -ne 0 ]; then
69
  exit 1
70
  fi
71
 
72
+ # Basic field checks
73
  if ! grep -q "url:" "$CONFIG_FILE"; then
74
+ echo "โŒ target.url configuration not found"
75
  exit 1
76
  fi
77
 
78
  if ! grep -q "llm_config:" "$CONFIG_FILE"; then
79
+ echo "โŒ llm_config configuration not found"
80
  exit 1
81
  fi
82
 
83
  if ! grep -q "test_config:" "$CONFIG_FILE"; then
84
+ echo "โŒ test_config configuration not found"
85
  exit 1
86
  fi
87
 
88
+ # Check if any tests are enabled (supports True/true)
89
  if ! grep -i "enabled: *true" "$CONFIG_FILE"; then
90
+ echo "โŒ All tests are disabled, please enable at least one test"
91
  exit 1
92
  fi
93
 
94
+ # Check API Key in environment variables or configuration file
95
  if [ -z "$OPENAI_API_KEY" ] && ! grep -q "api_key:" "$CONFIG_FILE"; then
96
+ echo "โŒ LLM API Key not configured (requires environment variable OPENAI_API_KEY or llm_config.api_key in config file)"
97
  exit 1
98
  fi
99
 
100
+ echo "โœ… Basic configuration check passed"
101
 
102
+ # Create necessary directories
103
  mkdir -p logs reports
104
 
105
+ # Start container
106
+ echo "๐Ÿš€ Starting container..."
107
  docker-compose up
108
 
109
+ echo "โœ… Container startup completed!"
110
+ echo "๐Ÿ“‹ View logs: docker-compose logs -f"
111
+ echo "๐Ÿ›‘ Stop service: docker-compose down"
webqa-agent.py CHANGED
@@ -14,48 +14,48 @@ from webqa_agent.executor import ParallelMode
14
 
15
 
16
  def find_config_file(args_config=None):
17
- """ๆ™บ่ƒฝๆŸฅๆ‰พ้…็ฝฎๆ–‡ไปถ."""
18
- # 1. ๅ‘ฝไปค่กŒๅ‚ๆ•ฐไผ˜ๅ…ˆ็บงๆœ€้ซ˜
19
  if args_config:
20
  if os.path.isfile(args_config):
21
- print(f"โœ… ไฝฟ็”จๆŒ‡ๅฎš้…็ฝฎๆ–‡ไปถ: {args_config}")
22
  return args_config
23
  else:
24
- raise FileNotFoundError(f"โŒ ๆŒ‡ๅฎš็š„้…็ฝฎๆ–‡ไปถไธๅญ˜ๅœจ: {args_config}")
25
 
26
- # 2. ๆŒ‰ไผ˜ๅ…ˆ็บงๆœ็ดข้ป˜่ฎคไฝ็ฝฎ
27
  current_dir = os.getcwd()
28
  script_dir = os.path.dirname(os.path.abspath(__file__))
29
 
30
  default_paths = [
31
- os.path.join(current_dir, "config", "config.yaml"), # ๅฝ“ๅ‰็›ฎๅฝ•ไธ‹็š„config
32
- os.path.join(script_dir, "config", "config.yaml"), # ่„šๆœฌ็›ฎๅฝ•ไธ‹็š„config
33
- os.path.join(current_dir, "config.yaml"), # ๅฝ“ๅ‰็›ฎๅฝ•ๅ…ผๅฎนไฝ็ฝฎ
34
- os.path.join(script_dir, "config.yaml"), # ่„šๆœฌ็›ฎๅฝ•ๅ…ผๅฎนไฝ็ฝฎ
35
- "/app/config/config.yaml", # Dockerๅฎนๅ™จๅ†…็ปๅฏน่ทฏๅพ„
36
  ]
37
 
38
  for path in default_paths:
39
  if os.path.isfile(path):
40
- print(f"โœ… ่‡ชๅŠจๅ‘็Žฐ้…็ฝฎๆ–‡ไปถ: {path}")
41
  return path
42
 
43
- # ๅฆ‚ๆžœ้ƒฝๆ‰พไธๅˆฐ๏ผŒ็ป™ๅ‡บๆธ…ๆ™ฐ็š„้”™่ฏฏไฟกๆฏ
44
- print("โŒ ๆœชๆ‰พๅˆฐ้…็ฝฎๆ–‡ไปถ๏ผŒ่ฏทๆฃ€ๆŸฅไปฅไธ‹ไฝ็ฝฎ:")
45
  for path in default_paths:
46
  print(f" - {path}")
47
- raise FileNotFoundError("้…็ฝฎๆ–‡ไปถไธๅญ˜ๅœจ")
48
 
49
 
50
  def load_yaml(path):
51
  if not os.path.isfile(path):
52
- print(f"[ERROR] ้…็ฝฎๆ–‡ไปถไธๅญ˜ๅœจ: {path}", file=sys.stderr)
53
  sys.exit(1)
54
  try:
55
  with open(path, "r", encoding="utf-8") as f:
56
  return yaml.safe_load(f)
57
  except Exception as e:
58
- print(f"[ERROR] ่ฏปๅ– YAML ๅคฑ่ดฅ: {e}", file=sys.stderr)
59
  sys.exit(1)
60
 
61
 
@@ -64,34 +64,34 @@ async def check_playwright_browsers_async():
64
  async with async_playwright() as p:
65
  browser = await p.chromium.launch(headless=True)
66
  await browser.close()
67
- print("โœ… Playwright ๆต่งˆๅ™จๅฏ็”จ๏ผˆAsync API ๅฏๅŠจๆˆๅŠŸ๏ผ‰")
68
  return True
69
  except PlaywrightError as e:
70
- print(f"โš ๏ธ Playwright ๆต่งˆๅ™จไธๅฏ็”จ๏ผˆAsync API ๅคฑ่ดฅ๏ผ‰๏ผš{e}")
71
  return False
72
  except Exception as e:
73
- print(f"โŒ ๆฃ€ๆŸฅ Playwright ๅผ‚ๅธธ๏ผš{e}")
74
  return False
75
 
76
 
77
  def check_lighthouse_installation():
78
- """ๆฃ€ๆŸฅ Lighthouse ๆ˜ฏๅฆๆญฃ็กฎๅฎ‰่ฃ…."""
79
- # ่Žทๅ–้กน็›ฎๆ น็›ฎๅฝ•ๅ’Œๅฝ“ๅ‰ๅทฅไฝœ็›ฎๅฝ•
80
  script_dir = os.path.dirname(os.path.abspath(__file__))
81
  current_dir = os.getcwd()
82
 
83
- # ๅˆคๆ–ญๆ“ไฝœ็ณป็ปŸ็ฑปๅž‹๏ผŒWindowsไธ‹lighthouseๆ˜ฏ.cmdๆ–‡ไปถ
84
  is_windows = os.name == "nt"
85
  lighthouse_exe = "lighthouse.cmd" if is_windows else "lighthouse"
86
 
87
- # ๅฏ่ƒฝ็š„lighthouse่ทฏๅพ„๏ผˆๆœฌๅœฐๅฎ‰่ฃ…ไผ˜ๅ…ˆ๏ผ‰
88
  lighthouse_paths = [
89
- os.path.join(current_dir, "node_modules", ".bin", lighthouse_exe), # ๅฝ“ๅ‰็›ฎๅฝ•ๆœฌๅœฐๅฎ‰่ฃ…
90
- os.path.join(script_dir, "node_modules", ".bin", lighthouse_exe), # ่„šๆœฌ็›ฎๅฝ•ๆœฌๅœฐๅฎ‰่ฃ…
91
- "lighthouse", # ๅ…จๅฑ€ๅฎ‰่ฃ…่ทฏๅพ„๏ผˆๅ…œๅบ•๏ผ‰
92
  ]
93
 
94
- # ๅชๅœจ้žWindows็Žฏๅขƒไธ‹ๆทปๅŠ Docker่ทฏๅพ„
95
  if not is_windows:
96
  lighthouse_paths.insert(-1, os.path.join("/app", "node_modules", ".bin", "lighthouse"))
97
 
@@ -100,8 +100,8 @@ def check_lighthouse_installation():
100
  result = subprocess.run([lighthouse_path, "--version"], capture_output=True, text=True, timeout=10)
101
  if result.returncode == 0:
102
  version = result.stdout.strip()
103
- path_type = "ๆœฌๅœฐๅฎ‰่ฃ…" if "node_modules" in lighthouse_path else "ๅ…จๅฑ€ๅฎ‰่ฃ…"
104
- print(f"โœ… Lighthouse ๅฎ‰่ฃ…ๆˆๅŠŸ๏ผŒ็‰ˆๆœฌ๏ผš{version} ({path_type})")
105
  return True
106
  except subprocess.TimeoutExpired:
107
  continue
@@ -110,59 +110,59 @@ def check_lighthouse_installation():
110
  except Exception:
111
  continue
112
 
113
- print("โŒ Lighthouse ๆœชๆ‰พๅˆฐ๏ผŒๅทฒๆฃ€ๆŸฅ่ทฏๅพ„:")
114
  for path in lighthouse_paths:
115
  print(f" - {path}")
116
- print("่ฏท็กฎ่ฎค Lighthouse ๅทฒๆญฃ็กฎๅฎ‰่ฃ…๏ผš`npm install lighthouse chrome-launcher`")
117
  return False
118
 
119
 
120
  def check_nuclei_installation():
121
- """ๆฃ€ๆŸฅ Nuclei ๆ˜ฏๅฆๆญฃ็กฎๅฎ‰่ฃ…."""
122
  try:
123
- # ๆฃ€ๆŸฅ nuclei ๅ‘ฝไปคๆ˜ฏๅฆๅฏ็”จ
124
  result = subprocess.run(["nuclei", "-version"], capture_output=True, text=True, timeout=10)
125
  if result.returncode == 0:
126
  version = result.stdout.strip()
127
- print(f"โœ… Nuclei ๅฎ‰่ฃ…ๆˆๅŠŸ๏ผŒ็‰ˆๆœฌ๏ผš{version}")
128
  return True
129
  else:
130
- print(f"โš ๏ธ Nuclei ๅ‘ฝไปคๆ‰ง่กŒๅคฑ่ดฅ๏ผš{result.stderr}")
131
  return False
132
  except subprocess.TimeoutExpired:
133
- print("โŒ Nuclei ๆฃ€ๆŸฅ่ถ…ๆ—ถ")
134
  return False
135
  except FileNotFoundError:
136
- print("โŒ Nuclei ๆœชๅฎ‰่ฃ…ๆˆ–ไธๅœจ PATH ไธญ")
137
  return False
138
  except Exception as e:
139
- print(f"โŒ ๆฃ€ๆŸฅ Nuclei ๅผ‚ๅธธ๏ผš{e}")
140
  return False
141
 
142
 
143
  def validate_and_build_llm_config(cfg):
144
- """้ชŒ่ฏๅนถๆž„ๅปบLLM้…็ฝฎ๏ผŒ็Žฏๅขƒๅ˜้‡ไผ˜ๅ…ˆไบŽ้…็ฝฎๆ–‡ไปถ."""
145
- # ไปŽ้…็ฝฎๆ–‡ไปถ่ฏปๅ–
146
  llm_cfg_raw = cfg.get("llm_config", {})
147
 
148
- # ็Žฏๅขƒๅ˜้‡ไผ˜ๅ…ˆไบŽ้…็ฝฎๆ–‡ไปถ
149
  api_key = os.getenv("OPENAI_API_KEY") or llm_cfg_raw.get("api_key", "")
150
  base_url = os.getenv("OPENAI_BASE_URL") or llm_cfg_raw.get("base_url", "")
151
  model = llm_cfg_raw.get("model", "gpt-4o-mini")
152
- # ้‡‡ๆ ท้…็ฝฎ๏ผš้ป˜่ฎค temperature ไธบ 0.1๏ผ›top_p ้ป˜่ฎคไธ่ฎพ็ฝฎ
153
  temperature = llm_cfg_raw.get("temperature", 0.1)
154
  top_p = llm_cfg_raw.get("top_p")
155
 
156
- # ้ชŒ่ฏๅฟ…ๅกซๅญ—ๆฎต
157
  if not api_key:
158
  raise ValueError(
159
- "โŒ LLM API Key ๆœช้…็ฝฎ๏ผ่ฏท่ฎพ็ฝฎไปฅไธ‹ไน‹ไธ€๏ผš\n"
160
- " - ็Žฏๅขƒๅ˜้‡: OPENAI_API_KEY\n"
161
- " - ้…็ฝฎๆ–‡ไปถ: llm_config.api_key"
162
  )
163
 
164
  if not base_url:
165
- print("โš ๏ธ ๆœช่ฎพ็ฝฎ base_url๏ผŒๅฐ†ไฝฟ็”จ OpenAI ้ป˜่ฎคๅœฐๅ€")
166
  base_url = "https://api.openai.com/v1"
167
 
168
  llm_config = {
@@ -175,14 +175,14 @@ def validate_and_build_llm_config(cfg):
175
  if top_p is not None:
176
  llm_config["top_p"] = top_p
177
 
178
- # ๆ˜พ็คบ้…็ฝฎๆฅๆบ๏ผˆ้š่—ๆ•ๆ„Ÿไฟกๆฏ๏ผ‰
179
  api_key_masked = f"{api_key[:8]}...{api_key[-4:]}" if len(api_key) > 12 else "***"
180
  env_api_key = bool(os.getenv("OPENAI_API_KEY"))
181
  env_base_url = bool(os.getenv("OPENAI_BASE_URL"))
182
 
183
- print("โœ… LLM้…็ฝฎ้ชŒ่ฏๆˆๅŠŸ:")
184
- print(f" - API Key: {api_key_masked} ({'็Žฏๅขƒๅ˜้‡' if env_api_key else '้…็ฝฎๆ–‡ไปถ'})")
185
- print(f" - Base URL: {base_url} ({'็Žฏๅขƒๅ˜้‡' if env_base_url else '้…็ฝฎๆ–‡ไปถ/้ป˜่ฎค'})")
186
  print(f" - Model: {model}")
187
  print(f" - Temperature: {temperature}")
188
  if top_p is not None:
@@ -195,12 +195,12 @@ def build_test_configurations(cfg, cookies=None):
195
  tests = []
196
  tconf = cfg.get("test_config", {})
197
 
198
- # Docker็Žฏๅขƒๆฃ€ๆต‹๏ผšๅผบๅˆถheadlessๆจกๅผ
199
  is_docker = os.getenv("DOCKER_ENV") == "true"
200
  config_headless = cfg.get("browser_config", {}).get("headless", True)
201
 
202
  if is_docker and not config_headless:
203
- print("โš ๏ธ ๆฃ€ๆต‹ๅˆฐDocker็Žฏๅขƒ๏ผŒๅผบๅˆถๅฏ็”จheadlessๆจกๅผ")
204
  headless = True
205
  else:
206
  headless = config_headless
@@ -217,7 +217,6 @@ def build_test_configurations(cfg, cookies=None):
217
  tests.append(
218
  {
219
  "test_type": "ui_agent_langgraph",
220
- "test_name": "ๆ™บ่ƒฝๅŠŸ่ƒฝๆต‹่ฏ•",
221
  "enabled": True,
222
  "browser_config": base_browser,
223
  "test_specific_config": {
@@ -229,19 +228,11 @@ def build_test_configurations(cfg, cookies=None):
229
  else:
230
  tests += [
231
  {
232
- "test_type": "button_test",
233
- "test_name": "้ๅކๆต‹่ฏ•",
234
  "enabled": True,
235
  "browser_config": base_browser,
236
  "test_specific_config": {},
237
- },
238
- {
239
- "test_type": "web_basic_check",
240
- "test_name": "ๆŠ€ๆœฏๅฅๅบทๅบฆๆฃ€ๆŸฅ",
241
- "enabled": True,
242
- "browser_config": base_browser,
243
- "test_specific_config": {},
244
- },
245
  ]
246
 
247
  # ux test
@@ -249,7 +240,6 @@ def build_test_configurations(cfg, cookies=None):
249
  tests.append(
250
  {
251
  "test_type": "ux_test",
252
- "test_name": "็”จๆˆทไฝ“้ชŒๆต‹่ฏ•",
253
  "enabled": True,
254
  "browser_config": base_browser,
255
  "test_specific_config": {},
@@ -261,7 +251,6 @@ def build_test_configurations(cfg, cookies=None):
261
  tests.append(
262
  {
263
  "test_type": "performance",
264
- "test_name": "ๆ€ง่ƒฝๆต‹่ฏ•",
265
  "enabled": True,
266
  "browser_config": base_browser,
267
  "test_specific_config": {},
@@ -273,7 +262,6 @@ def build_test_configurations(cfg, cookies=None):
273
  tests.append(
274
  {
275
  "test_type": "security",
276
- "test_name": "ๅฎ‰ๅ…จๆต‹่ฏ•",
277
  "enabled": True,
278
  "browser_config": base_browser,
279
  "test_specific_config": {},
@@ -284,35 +272,35 @@ def build_test_configurations(cfg, cookies=None):
284
 
285
 
286
  async def run_tests(cfg):
287
- # 0. ๆ˜พ็คบ่ฟ่กŒ็Žฏๅขƒไฟกๆฏ
288
  is_docker = os.getenv("DOCKER_ENV") == "true"
289
- print(f"๐Ÿƒ ่ฟ่กŒ็Žฏๅขƒ: {'Dockerๅฎนๅ™จ' if is_docker else 'ๆœฌๅœฐ็Žฏๅขƒ'}")
290
  if is_docker:
291
- print("๐Ÿณ Dockerๆจกๅผ๏ผš่‡ชๅŠจๅฏ็”จheadlessๆต่งˆๅ™จ")
292
 
293
- # 1. ๆ นๆฎ้…็ฝฎๆฃ€ๆŸฅๆ‰€้œ€ๅทฅๅ…ท
294
  tconf = cfg.get("test_config", {})
295
 
296
- # ๆ˜พ็คบๅฏ็”จ็š„ๆต‹่ฏ•็ฑปๅž‹
297
  enabled_tests = []
298
  if tconf.get("function_test", {}).get("enabled"):
299
  test_type = tconf.get("function_test", {}).get("type", "default")
300
- enabled_tests.append(f"ๅŠŸ่ƒฝๆต‹่ฏ•({test_type})")
301
  if tconf.get("ux_test", {}).get("enabled"):
302
- enabled_tests.append("็”จๆˆทไฝ“้ชŒๆต‹่ฏ•")
303
  if tconf.get("performance_test", {}).get("enabled"):
304
- enabled_tests.append("ๆ€ง่ƒฝๆต‹่ฏ•")
305
  if tconf.get("security_test", {}).get("enabled"):
306
- enabled_tests.append("ๅฎ‰ๅ…จๆต‹่ฏ•")
307
 
308
  if enabled_tests:
309
- print(f"๐Ÿ“‹ ๅฏ็”จ็š„ๆต‹่ฏ•็ฑปๅž‹: {', '.join(enabled_tests)}")
310
- print("๐Ÿ”ง ๆญฃๅœจๆ นๆฎ้…็ฝฎๆฃ€ๆŸฅๆ‰€้œ€ๅทฅๅ…ท...")
311
  else:
312
- print("โš ๏ธ ๆœชๅฏ็”จไปปไฝ•ๆต‹่ฏ•็ฑปๅž‹๏ผŒ่ฏทๆฃ€ๆŸฅ้…็ฝฎๆ–‡ไปถ")
313
  sys.exit(1)
314
 
315
- # ๆฃ€ๆŸฅๆ˜ฏๅฆ้œ€่ฆๆต่งˆๅ™จ๏ผˆๅคง้ƒจๅˆ†ๆต‹่ฏ•้ƒฝ้œ€่ฆ๏ผ‰
316
  needs_browser = any(
317
  [
318
  tconf.get("function_test", {}).get("enabled"),
@@ -323,85 +311,86 @@ async def run_tests(cfg):
323
  )
324
 
325
  if needs_browser:
326
- print("๐Ÿ” ๆฃ€ๆŸฅ Playwright ๆต่งˆๅ™จ...")
327
  ok = await check_playwright_browsers_async()
328
  if not ok:
329
- print("่ฏทๆ‰‹ๅŠจๆ‰ง่กŒ๏ผš`playwright install` ๆฅๅฎ‰่ฃ…ๆต่งˆๅ™จไบŒ่ฟ›ๅˆถ๏ผŒ็„ถๅŽ้‡่ฏ•ใ€‚", file=sys.stderr)
330
  sys.exit(1)
331
 
332
- # ๆฃ€ๆŸฅๆ˜ฏๅฆ้œ€่ฆ Lighthouse๏ผˆๆ€ง่ƒฝๆต‹่ฏ•๏ผ‰
333
  if tconf.get("performance_test", {}).get("enabled"):
334
- print("๐Ÿ” ๆฃ€ๆŸฅ Lighthouse ๅฎ‰่ฃ…...")
335
  lighthouse_ok = check_lighthouse_installation()
336
  if not lighthouse_ok:
337
- print("่ฏท็กฎ่ฎค Lighthouse ๅทฒๆญฃ็กฎๅฎ‰่ฃ…๏ผš`npm install lighthouse chrome-launcher`", file=sys.stderr)
338
  sys.exit(1)
339
 
340
- # ๆฃ€ๆŸฅๆ˜ฏๅฆ้œ€่ฆ Nuclei๏ผˆๅฎ‰ๅ…จๆต‹่ฏ•๏ผ‰
341
  if tconf.get("security_test", {}).get("enabled"):
342
- print("๐Ÿ” ๆฃ€ๆŸฅ Nuclei ๅฎ‰่ฃ…...")
343
  nuclei_ok = check_nuclei_installation()
344
  if not nuclei_ok:
345
- print("่ฏท็กฎ่ฎค Nuclei ๅทฒๆญฃ็กฎๅฎ‰่ฃ…ๅนถๅœจ PATH ไธญ", file=sys.stderr)
346
  sys.exit(1)
347
 
348
- # ้ชŒ่ฏๅ’Œๆž„ๅปบ LLM ้…็ฝฎ
349
  try:
350
  llm_config = validate_and_build_llm_config(cfg)
351
  except ValueError as e:
352
  print(f"[ERROR] {e}", file=sys.stderr)
353
  sys.exit(1)
354
 
355
- # ๆž„้€  test_configurations
356
  cookies = []
357
  test_configurations = build_test_configurations(cfg, cookies=cookies)
358
 
359
  target_url = cfg.get("target", {}).get("url", "")
360
 
361
- # ่ฐƒ็”จๆ‰ง่กŒๅ™จ
362
  try:
363
- # ไปŽ้…็ฝฎ่ฏปๅ–ๅนถ่กŒๅบฆ๏ผˆ้ป˜่ฎค2๏ผ‰๏ผŒๅ…่ฎธ็”จๆˆทๅœจ config.target.max_concurrent_tests ๆŒ‡ๅฎš
364
  raw_concurrency = cfg.get("target", {}).get("max_concurrent_tests", 2)
365
  try:
366
  max_concurrent_tests = int(raw_concurrency)
367
  if max_concurrent_tests < 1:
368
  raise ValueError
369
  except Exception:
370
- print(f"โš ๏ธ ๆ— ๆ•ˆ็š„ๅนถ่กŒ่ฎพ็ฝฎ: {raw_concurrency}๏ผŒๅทฒๅ›ž้€€ไธบ 2")
371
  max_concurrent_tests = 2
372
 
373
- print(f"โš™๏ธ ๅนถ่กŒๅบฆ: {max_concurrent_tests}")
374
 
375
  parallel_mode = ParallelMode([], max_concurrent_tests=max_concurrent_tests)
376
  results, report_path, html_report_path, result_count = await parallel_mode.run(
377
  url=target_url, llm_config=llm_config, test_configurations=test_configurations,
378
- log_cfg=cfg.get("log", {"level": "info"})
 
379
  )
380
  if result_count:
381
- print(f"๐Ÿ”ข ๆ€ป่ฏ„ไผฐๆ•ฐ๏ผš{result_count.get('total', 0)}")
382
- print(f"โœ… ๆˆๅŠŸๆ•ฐ๏ผš{result_count.get('passed', 0)}")
383
- print(f"โŒ ๅคฑ่ดฅๆ•ฐ๏ผš{result_count.get('failed', 0)}")
384
 
385
  if html_report_path:
386
- print("htmlๆŠฅๅ‘Š่ทฏๅพ„: ", html_report_path)
387
  else:
388
- print("htmlๆŠฅๅ‘Š็”Ÿๆˆๅคฑ่ดฅ")
389
  except Exception:
390
- print("ๆต‹่ฏ•ๆ‰ง่กŒๅคฑ่ดฅ๏ผŒๅ †ๆ ˆๅฆ‚ไธ‹๏ผš", file=sys.stderr)
391
  traceback.print_exc()
392
  sys.exit(1)
393
 
394
 
395
  def parse_args():
396
- parser = argparse.ArgumentParser(description="WebQA Agent ๆต‹่ฏ•ๅ…ฅๅฃ")
397
- parser.add_argument("--config", "-c", help="YAML ้…็ฝฎๆ–‡ไปถ่ทฏๅพ„ (ๅฏ้€‰๏ผŒ้ป˜่ฎค่‡ชๅŠจๆœ็ดข config/config.yaml)")
398
  return parser.parse_args()
399
 
400
 
401
  def main():
402
  args = parse_args()
403
 
404
- # ๆ™บ่ƒฝๆŸฅๆ‰พ้…็ฝฎๆ–‡ไปถ
405
  try:
406
  config_path = find_config_file(args.config)
407
  cfg = load_yaml(config_path)
@@ -409,7 +398,7 @@ def main():
409
  print(f"[ERROR] {e}", file=sys.stderr)
410
  sys.exit(1)
411
 
412
- # ่ฟ่กŒๆต‹่ฏ•
413
  asyncio.run(run_tests(cfg))
414
 
415
 
 
14
 
15
 
16
  def find_config_file(args_config=None):
17
+ """Intelligently find configuration file."""
18
+ # 1. Command line arguments have highest priority
19
  if args_config:
20
  if os.path.isfile(args_config):
21
+ print(f"โœ… Using specified config file: {args_config}")
22
  return args_config
23
  else:
24
+ raise FileNotFoundError(f"โŒ Specified config file not found: {args_config}")
25
 
26
+ # 2. Search default locations by priority
27
  current_dir = os.getcwd()
28
  script_dir = os.path.dirname(os.path.abspath(__file__))
29
 
30
  default_paths = [
31
+ os.path.join(current_dir, "config", "config.yaml"), # config in current directory
32
+ os.path.join(script_dir, "config", "config.yaml"), # config in script directory
33
+ os.path.join(current_dir, "config.yaml"), # compatible location in current directory
34
+ os.path.join(script_dir, "config.yaml"), # compatible location in script directory
35
+ "/app/config/config.yaml", # absolute path in Docker container
36
  ]
37
 
38
  for path in default_paths:
39
  if os.path.isfile(path):
40
+ print(f"โœ… Auto-discovered config file: {path}")
41
  return path
42
 
43
+ # If none found, provide clear error message
44
+ print("โŒ Config file not found, please check these locations:")
45
  for path in default_paths:
46
  print(f" - {path}")
47
+ raise FileNotFoundError("Config file does not exist")
48
 
49
 
50
  def load_yaml(path):
51
  if not os.path.isfile(path):
52
+ print(f"[ERROR] Config file not found: {path}", file=sys.stderr)
53
  sys.exit(1)
54
  try:
55
  with open(path, "r", encoding="utf-8") as f:
56
  return yaml.safe_load(f)
57
  except Exception as e:
58
+ print(f"[ERROR] Failed to read YAML: {e}", file=sys.stderr)
59
  sys.exit(1)
60
 
61
 
 
64
  async with async_playwright() as p:
65
  browser = await p.chromium.launch(headless=True)
66
  await browser.close()
67
+ print("โœ… Playwright browsers available (Async API startup successful)")
68
  return True
69
  except PlaywrightError as e:
70
+ print(f"โš ๏ธ Playwright browsers unavailable (Async API failed): {e}")
71
  return False
72
  except Exception as e:
73
+ print(f"โŒ Playwright check exception: {e}")
74
  return False
75
 
76
 
77
  def check_lighthouse_installation():
78
+ """Check if Lighthouse is properly installed."""
79
+ # Get project root directory and current working directory
80
  script_dir = os.path.dirname(os.path.abspath(__file__))
81
  current_dir = os.getcwd()
82
 
83
+ # Determine OS type, lighthouse is .cmd file on Windows
84
  is_windows = os.name == "nt"
85
  lighthouse_exe = "lighthouse.cmd" if is_windows else "lighthouse"
86
 
87
+ # Possible lighthouse paths (local installation priority)
88
  lighthouse_paths = [
89
+ os.path.join(current_dir, "node_modules", ".bin", lighthouse_exe), # local installation in current directory
90
+ os.path.join(script_dir, "node_modules", ".bin", lighthouse_exe), # local installation in script directory
91
+ "lighthouse", # global installation path (fallback)
92
  ]
93
 
94
+ # Add Docker path only in non-Windows environments
95
  if not is_windows:
96
  lighthouse_paths.insert(-1, os.path.join("/app", "node_modules", ".bin", "lighthouse"))
97
 
 
100
  result = subprocess.run([lighthouse_path, "--version"], capture_output=True, text=True, timeout=10)
101
  if result.returncode == 0:
102
  version = result.stdout.strip()
103
+ path_type = "Local installation" if "node_modules" in lighthouse_path else "Global installation"
104
+ print(f"โœ… Lighthouse installation successful, version: {version} ({path_type})")
105
  return True
106
  except subprocess.TimeoutExpired:
107
  continue
 
110
  except Exception:
111
  continue
112
 
113
+ print("โŒ Lighthouse not found, checked paths:")
114
  for path in lighthouse_paths:
115
  print(f" - {path}")
116
+ print("Please confirm Lighthouse is properly installed: `npm install lighthouse chrome-launcher`")
117
  return False
118
 
119
 
120
  def check_nuclei_installation():
121
+ """Check if Nuclei is properly installed."""
122
  try:
123
+ # Check if nuclei command is available
124
  result = subprocess.run(["nuclei", "-version"], capture_output=True, text=True, timeout=10)
125
  if result.returncode == 0:
126
  version = result.stdout.strip()
127
+ print(f"โœ… Nuclei installation successful, version: {version}")
128
  return True
129
  else:
130
+ print(f"โš ๏ธ Nuclei command execution failed: {result.stderr}")
131
  return False
132
  except subprocess.TimeoutExpired:
133
+ print("โŒ Nuclei check timeout")
134
  return False
135
  except FileNotFoundError:
136
+ print("โŒ Nuclei not installed or not in PATH")
137
  return False
138
  except Exception as e:
139
+ print(f"โŒ Nuclei check exception: {e}")
140
  return False
141
 
142
 
143
  def validate_and_build_llm_config(cfg):
144
+ """Validate and build LLM configuration, environment variables take priority over config file."""
145
+ # Read from config file
146
  llm_cfg_raw = cfg.get("llm_config", {})
147
 
148
+ # Environment variables take priority over config file
149
  api_key = os.getenv("OPENAI_API_KEY") or llm_cfg_raw.get("api_key", "")
150
  base_url = os.getenv("OPENAI_BASE_URL") or llm_cfg_raw.get("base_url", "")
151
  model = llm_cfg_raw.get("model", "gpt-4o-mini")
152
+ # Sampling configuration: default temperature is 0.1; top_p not set by default
153
  temperature = llm_cfg_raw.get("temperature", 0.1)
154
  top_p = llm_cfg_raw.get("top_p")
155
 
156
+ # Validate required fields
157
  if not api_key:
158
  raise ValueError(
159
+ "โŒ LLM API Key not configured! Please set one of the following:\n"
160
+ " - Environment variable: OPENAI_API_KEY\n"
161
+ " - Config file: llm_config.api_key"
162
  )
163
 
164
  if not base_url:
165
+ print("โš ๏ธ base_url not set, will use OpenAI default address")
166
  base_url = "https://api.openai.com/v1"
167
 
168
  llm_config = {
 
175
  if top_p is not None:
176
  llm_config["top_p"] = top_p
177
 
178
+ # Show configuration source (hide sensitive information)
179
  api_key_masked = f"{api_key[:8]}...{api_key[-4:]}" if len(api_key) > 12 else "***"
180
  env_api_key = bool(os.getenv("OPENAI_API_KEY"))
181
  env_base_url = bool(os.getenv("OPENAI_BASE_URL"))
182
 
183
+ print("โœ… LLM configuration validation successful:")
184
+ print(f" - API Key: {api_key_masked} ({'Environment variable' if env_api_key else 'Config file'})")
185
+ print(f" - Base URL: {base_url} ({'Environment variable' if env_base_url else 'Config file/Default'})")
186
  print(f" - Model: {model}")
187
  print(f" - Temperature: {temperature}")
188
  if top_p is not None:
 
195
  tests = []
196
  tconf = cfg.get("test_config", {})
197
 
198
+ # Docker environment detection: force headless mode
199
  is_docker = os.getenv("DOCKER_ENV") == "true"
200
  config_headless = cfg.get("browser_config", {}).get("headless", True)
201
 
202
  if is_docker and not config_headless:
203
+ print("โš ๏ธ Docker environment detected, forcing headless mode")
204
  headless = True
205
  else:
206
  headless = config_headless
 
217
  tests.append(
218
  {
219
  "test_type": "ui_agent_langgraph",
 
220
  "enabled": True,
221
  "browser_config": base_browser,
222
  "test_specific_config": {
 
228
  else:
229
  tests += [
230
  {
231
+ "test_type": "basic_test",
 
232
  "enabled": True,
233
  "browser_config": base_browser,
234
  "test_specific_config": {},
235
+ }
 
 
 
 
 
 
 
236
  ]
237
 
238
  # ux test
 
240
  tests.append(
241
  {
242
  "test_type": "ux_test",
 
243
  "enabled": True,
244
  "browser_config": base_browser,
245
  "test_specific_config": {},
 
251
  tests.append(
252
  {
253
  "test_type": "performance",
 
254
  "enabled": True,
255
  "browser_config": base_browser,
256
  "test_specific_config": {},
 
262
  tests.append(
263
  {
264
  "test_type": "security",
 
265
  "enabled": True,
266
  "browser_config": base_browser,
267
  "test_specific_config": {},
 
272
 
273
 
274
  async def run_tests(cfg):
275
+ # 0. Display runtime environment information
276
  is_docker = os.getenv("DOCKER_ENV") == "true"
277
+ print(f"๐Ÿƒ Runtime environment: {'Docker container' if is_docker else 'Local environment'}")
278
  if is_docker:
279
+ print("๐Ÿณ Docker mode: automatically enable headless browser")
280
 
281
+ # 1. Check required tools based on configuration
282
  tconf = cfg.get("test_config", {})
283
 
284
+ # Display enabled test types
285
  enabled_tests = []
286
  if tconf.get("function_test", {}).get("enabled"):
287
  test_type = tconf.get("function_test", {}).get("type", "default")
288
+ enabled_tests.append(f"Function Test ({test_type})")
289
  if tconf.get("ux_test", {}).get("enabled"):
290
+ enabled_tests.append("User Experience Test")
291
  if tconf.get("performance_test", {}).get("enabled"):
292
+ enabled_tests.append("Performance Test")
293
  if tconf.get("security_test", {}).get("enabled"):
294
+ enabled_tests.append("Security Test")
295
 
296
  if enabled_tests:
297
+ print(f"๐Ÿ“‹ Enabled test types: {', '.join(enabled_tests)}")
298
+ print("๐Ÿ”ง Checking required tools based on configuration...")
299
  else:
300
+ print("โš ๏ธ No test types enabled, please check configuration file")
301
  sys.exit(1)
302
 
303
+ # Check if browser is needed (most tests require it)
304
  needs_browser = any(
305
  [
306
  tconf.get("function_test", {}).get("enabled"),
 
311
  )
312
 
313
  if needs_browser:
314
+ print("๐Ÿ” Checking Playwright browsers...")
315
  ok = await check_playwright_browsers_async()
316
  if not ok:
317
+ print("Please manually run: `playwright install` to install browser binaries, then retry.", file=sys.stderr)
318
  sys.exit(1)
319
 
320
+ # Check if Lighthouse is needed (performance test)
321
  if tconf.get("performance_test", {}).get("enabled"):
322
+ print("๐Ÿ” Checking Lighthouse installation...")
323
  lighthouse_ok = check_lighthouse_installation()
324
  if not lighthouse_ok:
325
+ print("Please confirm Lighthouse is properly installed: `npm install lighthouse chrome-launcher`", file=sys.stderr)
326
  sys.exit(1)
327
 
328
+ # Check if Nuclei is needed (security test)
329
  if tconf.get("security_test", {}).get("enabled"):
330
+ print("๐Ÿ” Checking Nuclei installation...")
331
  nuclei_ok = check_nuclei_installation()
332
  if not nuclei_ok:
333
+ print("Please confirm Nuclei is properly installed and in PATH", file=sys.stderr)
334
  sys.exit(1)
335
 
336
+ # Validate and build LLM configuration
337
  try:
338
  llm_config = validate_and_build_llm_config(cfg)
339
  except ValueError as e:
340
  print(f"[ERROR] {e}", file=sys.stderr)
341
  sys.exit(1)
342
 
343
+ # Build test_configurations
344
  cookies = []
345
  test_configurations = build_test_configurations(cfg, cookies=cookies)
346
 
347
  target_url = cfg.get("target", {}).get("url", "")
348
 
349
+ # Call executor
350
  try:
351
+ # Read concurrency from config (default 2), allow users to specify in config.target.max_concurrent_tests
352
  raw_concurrency = cfg.get("target", {}).get("max_concurrent_tests", 2)
353
  try:
354
  max_concurrent_tests = int(raw_concurrency)
355
  if max_concurrent_tests < 1:
356
  raise ValueError
357
  except Exception:
358
+ print(f"โš ๏ธ Invalid concurrency setting: {raw_concurrency}, fallback to 2")
359
  max_concurrent_tests = 2
360
 
361
+ print(f"โš™๏ธ Concurrency: {max_concurrent_tests}")
362
 
363
  parallel_mode = ParallelMode([], max_concurrent_tests=max_concurrent_tests)
364
  results, report_path, html_report_path, result_count = await parallel_mode.run(
365
  url=target_url, llm_config=llm_config, test_configurations=test_configurations,
366
+ log_cfg=cfg.get("log", {"level": "info"}),
367
+ report_cfg=cfg.get("report", {"language": "en-US"})
368
  )
369
  if result_count:
370
+ print(f"๐Ÿ”ข Total evaluations: {result_count.get('total', 0)}")
371
+ print(f"โœ… Passed: {result_count.get('passed', 0)}")
372
+ print(f"โŒ Failed: {result_count.get('failed', 0)}")
373
 
374
  if html_report_path:
375
+ print("HTML report path: ", html_report_path)
376
  else:
377
+ print("HTML report generation failed")
378
  except Exception:
379
+ print("Test execution failed, stack trace:", file=sys.stderr)
380
  traceback.print_exc()
381
  sys.exit(1)
382
 
383
 
384
  def parse_args():
385
+ parser = argparse.ArgumentParser(description="WebQA Agent Test Entry Point")
386
+ parser.add_argument("--config", "-c", help="YAML configuration file path (optional, default auto-search config/config.yaml)")
387
  return parser.parse_args()
388
 
389
 
390
  def main():
391
  args = parse_args()
392
 
393
+ # Intelligently find configuration file
394
  try:
395
  config_path = find_config_file(args.config)
396
  cfg = load_yaml(config_path)
 
398
  print(f"[ERROR] {e}", file=sys.stderr)
399
  sys.exit(1)
400
 
401
+ # Run tests
402
  asyncio.run(run_tests(cfg))
403
 
404
 
webqa_agent/data/test_structures.py CHANGED
@@ -7,13 +7,22 @@ from pydantic import BaseModel
7
  from webqa_agent.browser.config import DEFAULT_CONFIG
8
 
9
  # ไพง่พนๆ ๆ ‡้ข˜๏ผˆ้ป˜่ฎค๏ผ‰
10
- CATEGORY_TITLES: Dict[str, str] = {
11
- "function": "ๅŠŸ่ƒฝๆต‹่ฏ•",
12
- "ux": "UXๆต‹่ฏ•",
13
- "performance": "ๆ€ง่ƒฝๆต‹่ฏ•",
14
- "security": "ๅฎ‰ๅ…จๆต‹่ฏ•",
 
 
 
 
 
 
 
 
15
  }
16
 
 
17
  class TestCategory(str, Enum):
18
  FUNCTION = "function"
19
  UX = "ux"
@@ -25,11 +34,12 @@ class TestType(str, Enum):
25
  """Test type enumeration."""
26
 
27
  UNKNOWN = "unknown"
28
- BUTTON_TEST = "button_test"
 
29
  UI_AGENT_LANGGRAPH = "ui_agent_langgraph"
30
  UX_TEST = "ux_test"
31
  PERFORMANCE = "performance_test"
32
- WEB_BASIC_CHECK = "web_basic_check"
33
  SECURITY_TEST = "security_test"
34
  SEO_TEST = "seo_test"
35
 
@@ -37,8 +47,9 @@ def get_category_for_test_type(test_type: TestType) -> TestCategory:
37
  """Map TestType to TestCategory."""
38
  mapping = {
39
  TestType.UI_AGENT_LANGGRAPH: TestCategory.FUNCTION,
40
- TestType.BUTTON_TEST: TestCategory.FUNCTION,
41
- TestType.WEB_BASIC_CHECK: TestCategory.FUNCTION,
 
42
  TestType.UX_TEST: TestCategory.UX,
43
  TestType.PERFORMANCE: TestCategory.PERFORMANCE,
44
  TestType.SECURITY_TEST: TestCategory.SECURITY,
@@ -48,22 +59,35 @@ def get_category_for_test_type(test_type: TestType) -> TestCategory:
48
 
49
 
50
  # ๆŠฅๅ‘Šๅญๆ ‡้ข˜ๆ 
51
- TEST_TYPE_DEFAULT_NAMES: Dict[TestType, str] = {
52
- TestType.UI_AGENT_LANGGRAPH: "ๆ™บ่ƒฝๅŠŸ่ƒฝๆต‹่ฏ•",
53
- TestType.BUTTON_TEST: "้ๅކๆต‹่ฏ•",
54
- TestType.WEB_BASIC_CHECK: "ๆŠ€ๆœฏๅฅๅบทๅบฆๆฃ€ๆŸฅ",
55
- TestType.UX_TEST: "็”จๆˆทไฝ“้ชŒๆต‹่ฏ•",
56
- TestType.PERFORMANCE: "ๆ€ง่ƒฝๆต‹่ฏ•",
57
- TestType.SECURITY_TEST: "ๅฎ‰ๅ…จๆต‹่ฏ•",
 
 
 
 
 
 
 
 
 
 
 
 
58
  }
59
 
60
 
61
- def get_default_test_name(test_type: TestType) -> str:
62
  """Return the internal default test name for a given TestType.
63
 
64
  Names are hardcoded and not user-configurable.
65
  """
66
- return TEST_TYPE_DEFAULT_NAMES.get(test_type, test_type.value)
 
67
 
68
  class TestStatus(str, Enum):
69
  """Test status enumeration."""
@@ -81,10 +105,11 @@ class TestConfiguration(BaseModel):
81
  """Test configuration for parallel execution."""
82
 
83
  test_id: Optional[str] = None
84
- test_type: Optional[TestType] = TestType.WEB_BASIC_CHECK
85
  test_name: Optional[str] = ""
86
  enabled: Optional[bool] = True
87
  browser_config: Optional[Dict[str, Any]] = DEFAULT_CONFIG
 
88
  test_specific_config: Optional[Dict[str, Any]] = {}
89
  timeout: Optional[int] = 300 # seconds
90
  retry_count: Optional[int] = 0
@@ -284,15 +309,18 @@ class ParallelTestSession(BaseModel):
284
  """Convert session to dictionary with grouped test results."""
285
  grouped_results: Dict[str, Dict[str, Any]] = {}
286
 
 
 
 
287
  for cat in TestCategory:
288
  key = f"{cat.value}_test_results"
289
- grouped_results[key] = {"title": CATEGORY_TITLES.get(cat.value, cat.name), "items": []}
290
 
291
  for result in self.test_results.values():
292
  key = f"{result.category.value}_test_results"
293
  if key not in grouped_results:
294
  grouped_results[key] = {
295
- "title": CATEGORY_TITLES.get(result.category.value, result.category.name.title()),
296
  "items": [],
297
  }
298
  grouped_results[key]["items"].append(result.dict())
 
7
  from webqa_agent.browser.config import DEFAULT_CONFIG
8
 
9
  # ไพง่พนๆ ๆ ‡้ข˜๏ผˆ้ป˜่ฎค๏ผ‰
10
+ CATEGORY_TITLES: Dict[str, Dict[str, str]] = {
11
+ "zh-CN": {
12
+ "function": "ๅŠŸ่ƒฝๆต‹่ฏ•",
13
+ "ux": "UXๆต‹่ฏ•",
14
+ "performance": "ๆ€ง่ƒฝๆต‹่ฏ•",
15
+ "security": "ๅฎ‰ๅ…จๆต‹่ฏ•",
16
+ },
17
+ "en-US": {
18
+ "function": "Function Test",
19
+ "ux": "UX Test",
20
+ "performance": "Performance Test",
21
+ "security": "Security Test",
22
+ }
23
  }
24
 
25
+
26
  class TestCategory(str, Enum):
27
  FUNCTION = "function"
28
  UX = "ux"
 
34
  """Test type enumeration."""
35
 
36
  UNKNOWN = "unknown"
37
+ BASIC_TEST = "basic_test"
38
+ # BUTTON_TEST = "button_test"
39
  UI_AGENT_LANGGRAPH = "ui_agent_langgraph"
40
  UX_TEST = "ux_test"
41
  PERFORMANCE = "performance_test"
42
+ # WEB_BASIC_CHECK = "web_basic_check"
43
  SECURITY_TEST = "security_test"
44
  SEO_TEST = "seo_test"
45
 
 
47
  """Map TestType to TestCategory."""
48
  mapping = {
49
  TestType.UI_AGENT_LANGGRAPH: TestCategory.FUNCTION,
50
+ TestType.BASIC_TEST: TestCategory.FUNCTION,
51
+ # TestType.BUTTON_TEST: TestCategory.FUNCTION,
52
+ # TestType.WEB_BASIC_CHECK: TestCategory.FUNCTION,
53
  TestType.UX_TEST: TestCategory.UX,
54
  TestType.PERFORMANCE: TestCategory.PERFORMANCE,
55
  TestType.SECURITY_TEST: TestCategory.SECURITY,
 
59
 
60
 
61
  # ๆŠฅๅ‘Šๅญๆ ‡้ข˜ๆ 
62
+ TEST_TYPE_DEFAULT_NAMES: Dict[str, Dict[TestType, str]] = {
63
+ "zh-CN": {
64
+ TestType.UI_AGENT_LANGGRAPH: "ๆ™บ่ƒฝๅŠŸ่ƒฝๆต‹่ฏ•",
65
+ TestType.BASIC_TEST: "้ๅކๆต‹่ฏ•",
66
+ # TestType.BUTTON_TEST: "ๅŠŸ่ƒฝๆต‹่ฏ•",
67
+ # TestType.WEB_BASIC_CHECK: "ๆŠ€ๆœฏๅฅๅบทๅบฆๆฃ€ๆŸฅ",
68
+ TestType.UX_TEST: "็”จๆˆทไฝ“้ชŒๆต‹่ฏ•",
69
+ TestType.PERFORMANCE: "ๆ€ง่ƒฝๆต‹่ฏ•",
70
+ TestType.SECURITY_TEST: "ๅฎ‰ๅ…จๆต‹่ฏ•",
71
+ },
72
+ "en-US": {
73
+ TestType.UI_AGENT_LANGGRAPH: "AI Function Test",
74
+ TestType.BASIC_TEST: "Basic Function Test",
75
+ # TestType.BUTTON_TEST: "Traversal Test",
76
+ # TestType.WEB_BASIC_CHECK: "Technical Health Check",
77
+ TestType.UX_TEST: "UX Test",
78
+ TestType.PERFORMANCE: "Performance Test",
79
+ TestType.SECURITY_TEST: "Security Test",
80
+ }
81
  }
82
 
83
 
84
+ def get_default_test_name(test_type: TestType, language: str = "zh-CN") -> str:
85
  """Return the internal default test name for a given TestType.
86
 
87
  Names are hardcoded and not user-configurable.
88
  """
89
+ return TEST_TYPE_DEFAULT_NAMES.get(language, {}).get(test_type, test_type.value)
90
+
91
 
92
  class TestStatus(str, Enum):
93
  """Test status enumeration."""
 
105
  """Test configuration for parallel execution."""
106
 
107
  test_id: Optional[str] = None
108
+ test_type: Optional[TestType] = TestType.BASIC_TEST
109
  test_name: Optional[str] = ""
110
  enabled: Optional[bool] = True
111
  browser_config: Optional[Dict[str, Any]] = DEFAULT_CONFIG
112
+ report_config: Optional[Dict[str, Any]] = {"language": "zh-CN"}
113
  test_specific_config: Optional[Dict[str, Any]] = {}
114
  timeout: Optional[int] = 300 # seconds
115
  retry_count: Optional[int] = 0
 
309
  """Convert session to dictionary with grouped test results."""
310
  grouped_results: Dict[str, Dict[str, Any]] = {}
311
 
312
+ if self.test_configurations and len(self.test_configurations) > 0:
313
+ language = self.test_configurations[0].report_config.get("language", "zh-CN")
314
+
315
  for cat in TestCategory:
316
  key = f"{cat.value}_test_results"
317
+ grouped_results[key] = {"title": CATEGORY_TITLES[language].get(cat.value, cat.name), "items": []}
318
 
319
  for result in self.test_results.values():
320
  key = f"{result.category.value}_test_results"
321
  if key not in grouped_results:
322
  grouped_results[key] = {
323
+ "title": CATEGORY_TITLES[language].get(result.category.value, result.category.name.title()),
324
  "items": [],
325
  }
326
  grouped_results[key]["items"].append(result.dict())
webqa_agent/executor/__init__.py CHANGED
@@ -2,17 +2,16 @@ from .parallel_executor import ParallelTestExecutor
2
  from .parallel_mode import ParallelMode
3
  from .result_aggregator import ResultAggregator
4
  from .test_runners import (
5
- BaseTestRunner,
6
  LighthouseTestRunner,
7
  UIAgentLangGraphRunner,
8
- UXTestRunner,
9
- WebBasicCheckRunner,
10
  )
11
 
12
  __all__ = [
13
  "ParallelMode",
14
  "ParallelTestExecutor",
15
- "BaseTestRunner",
16
  "UIAgentLangGraphRunner",
17
  "UXTestRunner",
18
  "LighthouseTestRunner",
 
2
  from .parallel_mode import ParallelMode
3
  from .result_aggregator import ResultAggregator
4
  from .test_runners import (
5
+ BasicTestRunner,
6
  LighthouseTestRunner,
7
  UIAgentLangGraphRunner,
8
+ UXTestRunner
 
9
  )
10
 
11
  __all__ = [
12
  "ParallelMode",
13
  "ParallelTestExecutor",
14
+ "BasicTestRunner",
15
  "UIAgentLangGraphRunner",
16
  "UXTestRunner",
17
  "LighthouseTestRunner",
webqa_agent/executor/parallel_executor.py CHANGED
@@ -11,12 +11,11 @@ from webqa_agent.data import ParallelTestSession, TestConfiguration, TestResult,
11
  from webqa_agent.data.test_structures import get_category_for_test_type
12
  from webqa_agent.executor.result_aggregator import ResultAggregator
13
  from webqa_agent.executor.test_runners import (
14
- ButtonTestRunner,
15
  LighthouseTestRunner,
16
  SecurityTestRunner,
17
  UIAgentLangGraphRunner,
18
  UXTestRunner,
19
- WebBasicCheckRunner,
20
  )
21
  from webqa_agent.utils.log_icon import icon
22
 
@@ -27,15 +26,15 @@ class ParallelTestExecutor:
27
  def __init__(self, max_concurrent_tests: int = 4):
28
  self.max_concurrent_tests = max_concurrent_tests
29
  self.session_manager = BrowserSessionManager()
30
- self.result_aggregator = ResultAggregator()
31
 
32
  # Test runners mapping
33
  self.test_runners = {
34
  TestType.UI_AGENT_LANGGRAPH: UIAgentLangGraphRunner(),
35
  TestType.UX_TEST: UXTestRunner(),
36
  TestType.PERFORMANCE: LighthouseTestRunner(),
37
- TestType.WEB_BASIC_CHECK: WebBasicCheckRunner(),
38
- TestType.BUTTON_TEST: ButtonTestRunner(),
 
39
  TestType.SECURITY_TEST: SecurityTestRunner(),
40
  }
41
 
@@ -85,7 +84,12 @@ class ParallelTestExecutor:
85
 
86
  # Resolve dependencies and create execution order
87
  execution_batches = self._resolve_test_dependencies(enabled_tests)
88
-
 
 
 
 
 
89
  for batch_idx, test_batch in enumerate(execution_batches):
90
  logging.debug(f"Executing batch {batch_idx + 1}/{len(execution_batches)} with {len(test_batch)} tests")
91
 
@@ -174,8 +178,9 @@ class ParallelTestExecutor:
174
  if test_config.test_type in [
175
  TestType.UI_AGENT_LANGGRAPH,
176
  TestType.UX_TEST,
177
- TestType.BUTTON_TEST,
178
- TestType.WEB_BASIC_CHECK,
 
179
  ]:
180
 
181
  # Create isolated browser session
 
11
  from webqa_agent.data.test_structures import get_category_for_test_type
12
  from webqa_agent.executor.result_aggregator import ResultAggregator
13
  from webqa_agent.executor.test_runners import (
14
+ BasicTestRunner,
15
  LighthouseTestRunner,
16
  SecurityTestRunner,
17
  UIAgentLangGraphRunner,
18
  UXTestRunner,
 
19
  )
20
  from webqa_agent.utils.log_icon import icon
21
 
 
26
  def __init__(self, max_concurrent_tests: int = 4):
27
  self.max_concurrent_tests = max_concurrent_tests
28
  self.session_manager = BrowserSessionManager()
 
29
 
30
  # Test runners mapping
31
  self.test_runners = {
32
  TestType.UI_AGENT_LANGGRAPH: UIAgentLangGraphRunner(),
33
  TestType.UX_TEST: UXTestRunner(),
34
  TestType.PERFORMANCE: LighthouseTestRunner(),
35
+ TestType.BASIC_TEST: BasicTestRunner(),
36
+ # TestType.WEB_BASIC_CHECK: WebBasicCheckRunner(),
37
+ # TestType.BUTTON_TEST: ButtonTestRunner(),
38
  TestType.SECURITY_TEST: SecurityTestRunner(),
39
  }
40
 
 
84
 
85
  # Resolve dependencies and create execution order
86
  execution_batches = self._resolve_test_dependencies(enabled_tests)
87
+ # Get report_config from the first test configuration if available
88
+ report_config = None
89
+ if test_session.test_configurations:
90
+ report_config = test_session.test_configurations[0].report_config
91
+ self.result_aggregator = ResultAggregator(report_config)
92
+
93
  for batch_idx, test_batch in enumerate(execution_batches):
94
  logging.debug(f"Executing batch {batch_idx + 1}/{len(execution_batches)} with {len(test_batch)} tests")
95
 
 
178
  if test_config.test_type in [
179
  TestType.UI_AGENT_LANGGRAPH,
180
  TestType.UX_TEST,
181
+ TestType.BASIC_TEST
182
+ # TestType.BUTTON_TEST,
183
+ # TestType.WEB_BASIC_CHECK,
184
  ]:
185
 
186
  # Create isolated browser session
webqa_agent/executor/parallel_mode.py CHANGED
@@ -24,7 +24,8 @@ class ParallelMode:
24
  llm_config: Dict[str, Any],
25
  browser_config: Optional[Dict[str, Any]] = None,
26
  test_configurations: Optional[List[Dict[str, Any]]] = None,
27
- log_cfg: Optional[Dict[str, Any]] = None
 
28
  ) -> Tuple[Dict[str, Any], str]:
29
  """Run tests in parallel mode with configurable test types.
30
 
@@ -34,6 +35,7 @@ class ParallelMode:
34
  browser_config: Default browser configuration
35
  test_configurations: Custom test configurations for parallel execution
36
  log_cfg: Configuration for logger
 
37
 
38
  Returns:
39
  Tuple of (aggregated_results, report_path)
@@ -41,7 +43,7 @@ class ParallelMode:
41
  try:
42
 
43
  GetLog.get_log(log_level=log_cfg["level"])
44
- Display.init()
45
  Display.display.start()
46
 
47
  logging.info(f"{icon['rocket']} Starting tests for URL: {url}, parallel mode {self.max_concurrent_tests}")
@@ -59,7 +61,7 @@ class ParallelMode:
59
 
60
  # Configure tests based on input or legacy test objects
61
  if test_configurations:
62
- self._configure_tests_from_config(test_session, test_configurations, browser_config)
63
 
64
  # Execute tests in parallel
65
  completed_session = await self.executor.execute_parallel_tests(test_session)
@@ -86,10 +88,11 @@ class ParallelMode:
86
  test_session: ParallelTestSession,
87
  test_configurations: List[Dict[str, Any]],
88
  default_browser_config: Dict[str, Any],
 
89
  ):
90
  """Configure tests from provided configuration."""
91
  for config in test_configurations:
92
- test_type_str = config.get("test_type", "web_basic_check")
93
 
94
  # Map string to TestType enum
95
  test_type = self._map_test_type(test_type_str)
@@ -100,9 +103,10 @@ class ParallelMode:
100
  test_config = TestConfiguration(
101
  test_id=str(uuid.uuid4()),
102
  test_type=test_type,
103
- test_name=get_default_test_name(test_type),
104
  enabled=config.get("enabled", True),
105
  browser_config=browser_config,
 
106
  test_specific_config=config.get("test_specific_config", {}),
107
  timeout=config.get("timeout", 300),
108
  retry_count=config.get("retry_count", 0),
@@ -117,10 +121,11 @@ class ParallelMode:
117
  "ui_agent_langgraph": TestType.UI_AGENT_LANGGRAPH,
118
  "ux_test": TestType.UX_TEST,
119
  "performance": TestType.PERFORMANCE,
120
- "web_basic_check": TestType.WEB_BASIC_CHECK,
121
- "button_test": TestType.BUTTON_TEST,
 
122
  "security": TestType.SECURITY_TEST,
123
  "security_test": TestType.SECURITY_TEST,
124
  }
125
 
126
- return mapping.get(test_type_str, TestType.WEB_BASIC_CHECK)
 
24
  llm_config: Dict[str, Any],
25
  browser_config: Optional[Dict[str, Any]] = None,
26
  test_configurations: Optional[List[Dict[str, Any]]] = None,
27
+ log_cfg: Optional[Dict[str, Any]] = None,
28
+ report_cfg: Optional[Dict[str, Any]] = None
29
  ) -> Tuple[Dict[str, Any], str]:
30
  """Run tests in parallel mode with configurable test types.
31
 
 
35
  browser_config: Default browser configuration
36
  test_configurations: Custom test configurations for parallel execution
37
  log_cfg: Configuration for logger
38
+ report_cfg: Configuration for report
39
 
40
  Returns:
41
  Tuple of (aggregated_results, report_path)
 
43
  try:
44
 
45
  GetLog.get_log(log_level=log_cfg["level"])
46
+ Display.init(language=report_cfg["language"])
47
  Display.display.start()
48
 
49
  logging.info(f"{icon['rocket']} Starting tests for URL: {url}, parallel mode {self.max_concurrent_tests}")
 
61
 
62
  # Configure tests based on input or legacy test objects
63
  if test_configurations:
64
+ self._configure_tests_from_config(test_session, test_configurations, browser_config, report_cfg)
65
 
66
  # Execute tests in parallel
67
  completed_session = await self.executor.execute_parallel_tests(test_session)
 
88
  test_session: ParallelTestSession,
89
  test_configurations: List[Dict[str, Any]],
90
  default_browser_config: Dict[str, Any],
91
+ report_cfg: Dict[str, Any]
92
  ):
93
  """Configure tests from provided configuration."""
94
  for config in test_configurations:
95
+ test_type_str = config.get("test_type", "basic_test")
96
 
97
  # Map string to TestType enum
98
  test_type = self._map_test_type(test_type_str)
 
103
  test_config = TestConfiguration(
104
  test_id=str(uuid.uuid4()),
105
  test_type=test_type,
106
+ test_name=get_default_test_name(test_type, report_cfg["language"]),
107
  enabled=config.get("enabled", True),
108
  browser_config=browser_config,
109
+ report_config=report_cfg,
110
  test_specific_config=config.get("test_specific_config", {}),
111
  timeout=config.get("timeout", 300),
112
  retry_count=config.get("retry_count", 0),
 
121
  "ui_agent_langgraph": TestType.UI_AGENT_LANGGRAPH,
122
  "ux_test": TestType.UX_TEST,
123
  "performance": TestType.PERFORMANCE,
124
+ "basic_test": TestType.BASIC_TEST,
125
+ # "web_basic_check": TestType.WEB_BASIC_CHECK,
126
+ # "button_test": TestType.BUTTON_TEST,
127
  "security": TestType.SECURITY_TEST,
128
  "security_test": TestType.SECURITY_TEST,
129
  }
130
 
131
+ return mapping.get(test_type_str, TestType.BASIC_TEST)
webqa_agent/executor/result_aggregator.py CHANGED
@@ -6,11 +6,27 @@ from typing import Any, Dict, List, Optional
6
 
7
  from webqa_agent.data import ParallelTestSession, TestStatus
8
  from webqa_agent.llm.llm_api import LLMAPI
9
-
10
 
11
  class ResultAggregator:
12
  """Aggregates and analyzes parallel test results"""
13
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
  async def aggregate_results(self, test_session: ParallelTestSession) -> Dict[str, Any]:
15
  """Aggregate all test results into a comprehensive summary.
16
 
@@ -51,20 +67,20 @@ class ResultAggregator:
51
  executive_content = {
52
  "executiveSummary": "",
53
  "statistics": [
54
- {"label": "่ฏ„ไผฐ็ฑปๅˆซ", "value": str(total_sub_tests), "colorClass": "var(--warning-color)"},
55
- {"label": "้€š่ฟ‡ๆ•ฐ", "value": str(passed_sub_tests), "colorClass": "var(--success-color)"},
56
- {"label": "ๅคฑ่ดฅๆ•ฐ", "value": str(critical_sub_tests), "colorClass": "var(--failure-color)"},
57
  ]
58
  }
59
 
60
  aggregated_results_list = [
61
- {"id": "subtab-summary-advice", "title": "ๆ‘˜่ฆไธŽๅปบ่ฎฎ", "content": executive_content},
62
  {
63
  "id": "subtab-issue-tracker",
64
- "title": "้—ฎ้ข˜ๅˆ—่กจ",
65
  "content": {
66
- "title": "้—ฎ้ข˜่ฟฝ่ธชๅˆ—่กจ",
67
- "note": "ๆณจ๏ผšๆญคๅˆ—่กจๆฑ‡ๆ€ปไบ†ๆ‰€ๆœ‰ๆฃ€ๆต‹ๅˆฐ็š„โ€œๅคฑ่ดฅโ€ๅ’Œโ€œ่ญฆๅ‘Šโ€้กน",
68
  "issues": issues,
69
  },
70
  },
@@ -89,7 +105,7 @@ class ResultAggregator:
89
 
90
  # Also expose simple counters at the top-level for easy consumption
91
  return {
92
- "title": "่ฏ„ไผฐๆ€ป่งˆ",
93
  "tabs": aggregated_results_list,
94
  "count":{
95
  "total": total_sub_tests,
@@ -117,7 +133,6 @@ class ResultAggregator:
117
  logging.error(f"Failed to initialise LLM, falling back to heuristic issue extraction: {e}")
118
  use_llm = False
119
 
120
- logging.debug(f"LLM ๆ€ป็ป“ๆต‹่ฏ•็ป“ๆžœไธญ...")
121
  # Iterate over all tests and their sub-tests
122
  for test_result in test_session.test_results.values():
123
  for sub in test_result.sub_tests or []:
@@ -133,7 +148,7 @@ class ResultAggregator:
133
  severity_level = "medium"
134
 
135
  issue_entry = {
136
- "issue_name": "ๆต‹่ฏ•ไธ้€š่ฟ‡: "+test_result.test_name,
137
  "issue_type": test_result.test_type.value,
138
  "sub_test_name": sub.name,
139
  "severity": severity_level,
@@ -147,14 +162,8 @@ class ResultAggregator:
147
  "final_summary": sub.final_summary,
148
  }
149
  prompt = (
150
- "ไฝ ๆ˜ฏไธ€ๅ็ป้ชŒไธฐๅฏŒ็š„่ฝฏไปถๆต‹่ฏ•ๅˆ†ๆžๅธˆใ€‚่ฏท้˜…่ฏปไปฅไธ‹ๅญๆต‹่ฏ•ไฟกๆฏ๏ผŒๆๅ–ใ€้—ฎ้ข˜ๅ†…ๅฎนใ€‘ใ€ใ€้—ฎ้ข˜ๆ•ฐ้‡ใ€‘ๅ’Œใ€ไธฅ้‡็จ‹ๅบฆใ€‘๏ผš\n"
151
- "1๏ผ‰ๅฆ‚ๆžœ status = pass๏ผŒ่ฏท่ฟ”ๅ›ž JSON {\"issue_count\": 0}ใ€‚\n"
152
- "2๏ผ‰ๅฆ‚ๆžœ status != pass๏ผŒๅˆ™ๆ นๆฎ reportใ€metrics ๆˆ– final_summary ็š„ๅ…ทไฝ“ๅ†…ๅฎนๅˆคๆ–ญ๏ผš\n"
153
- " - ๆๅ–ๆœ€ๅ…ณ้”ฎ็š„ไธ€ๅฅ่ฏ้—ฎ้ข˜ๆ่ฟฐ issues\n"
154
- " - ็ปŸ่ฎก้—ฎ้ข˜ๆ•ฐ้‡ issue_count๏ผˆๅฆ‚ๆžœๆ— ๆณ•ๅ‡†็กฎ็ปŸ่ฎก๏ผŒๅฏ้ป˜่ฎคไธบ 1๏ผ‰\n"
155
- " - ไธฅ้‡็จ‹ๅบฆๅˆคๆ–ญ๏ผšไผ˜ๅ…ˆๆŸฅ็œ‹ report ไธญๆ˜ฏๅฆๅทฒๆ ‡ๆ˜Žไธฅ้‡็จ‹ๅบฆ๏ผˆๅฆ‚ high/medium/lowใ€ไธฅ้‡/ไธญ็ญ‰/่ฝปๅพฎใ€critical/major/minor ๏ฟฝ๏ฟฝ๏ผ‰๏ผŒๅฆ‚ๆžœๆœ‰ๅˆ™็›ดๆŽฅ้ตๅพช๏ผ›ๅฆ‚ๆžœ report ไธญๆฒกๆœ‰ๆ˜Ž็กฎๆ ‡ๆ˜Ž๏ผŒๅˆ™ๆ นๆฎ้—ฎ้ข˜ๅฝฑๅ“็จ‹ๅบฆ่‡ช่กŒๅˆคๆ–ญ๏ผšhigh๏ผˆไธฅ้‡ๅฝฑๅ“ๅŠŸ่ƒฝ/ๆ€ง่ƒฝ๏ผ‰ใ€medium๏ผˆไธญ็ญ‰ๅฝฑๅ“๏ผ‰ใ€low๏ผˆ่ฝปๅพฎ้—ฎ้ข˜/่ญฆๅ‘Š๏ผ‰\n"
156
- "3๏ผ‰ไฝ ไธ่ƒฝ่พ“ๅ‡บไปปไฝ•ๅ…ถไป–ๅ†…ๅฎน๏ผŒไนŸไธ่ƒฝ่พ“ๅ‡บไปฃ็ ๅ—๏ผŒๅช่ƒฝ่พ“ๅ‡บ็ปŸไธ€ไธบ JSON๏ผš{\"issue_count\": <ๆ•ฐๅญ—>, \"issues\": \"ไธ€ๅฅ่ฏไธญๆ–‡้—ฎ้ข˜ๆ่ฟฐ\", \"severity\": \"high|medium|low\"}ใ€‚\n"
157
- f"ๅญๆต‹่ฏ•ไฟกๆฏ: {json.dumps(prompt_content, ensure_ascii=False, default=str)}"
158
  )
159
  logging.debug(f"LLM Issue Prompt: {prompt}")
160
  llm_response_raw = await llm.get_llm_response("", prompt)
@@ -202,67 +211,6 @@ class ResultAggregator:
202
  logging.warning(f"Failed to close LLM client: {e}")
203
  return critical_issues
204
 
205
- async def generate_llm_summary(self, aggregated_results: Dict[str, Any], llm_config: Dict[str, Any]) -> str:
206
- """Generate LLM-powered summary and analysis."""
207
- try:
208
- llm = LLMAPI(llm_config)
209
-
210
- # Create comprehensive prompt
211
- prompt = self._create_analysis_prompt(aggregated_results)
212
-
213
- # Get LLM analysis
214
- await llm.initialize() # ็กฎไฟLLMๅทฒๅˆๅง‹ๅŒ–
215
- summary = await llm.get_llm_response("", prompt)
216
-
217
- return summary
218
-
219
- except Exception as e:
220
- logging.error(f"Failed to generate LLM summary: {e}")
221
- return f"LLM summary generation failed: {str(e)}"
222
-
223
- def _create_analysis_prompt(self, aggregated_results: Dict[str, Any]) -> str:
224
- """Create analysis prompt for LLM."""
225
- prompt = f"""
226
- ่ฏทๅŸบไบŽไปฅไธ‹ๅนถ่กŒๆต‹่ฏ•็ป“ๆžœ่ฟ›่กŒ็ปผๅˆๅˆ†ๆž๏ผŒ็”Ÿๆˆไธ“ไธš็š„ๆต‹่ฏ•ๆŠฅๅ‘Šๆ€ป็ป“๏ผš
227
-
228
- ## ๆต‹่ฏ•ไผš่ฏๆฆ‚่งˆ
229
- {json.dumps(aggregated_results.get('session_summary', {}), indent=2, ensure_ascii=False)}
230
-
231
- ## ๆ•ดไฝ“ๆŒ‡ๆ ‡
232
- {json.dumps(aggregated_results.get('overall_metrics', {}), indent=2, ensure_ascii=False)}
233
-
234
- ## ๆ€ง่ƒฝๅˆ†ๆž
235
- {json.dumps(aggregated_results.get('lighthouse_summary', {}), indent=2, ensure_ascii=False)}
236
-
237
- ## ็”จๆˆทไฝ“้ชŒๅˆ†ๆž
238
- {json.dumps(aggregated_results.get('ux_analysis', {}), indent=2, ensure_ascii=False)}
239
-
240
- ## ๆŠ€ๆœฏๅฅๅบทๅบฆ
241
- {json.dumps(aggregated_results.get('technical_health', {}), indent=2, ensure_ascii=False)}
242
-
243
- ## ๅŠŸ่ƒฝๅˆ†ๆž
244
- {json.dumps(aggregated_results.get('ui_functionality', {}), indent=2, ensure_ascii=False)}
245
-
246
- ## ๅ…ณ้”ฎ้—ฎ้ข˜
247
- {json.dumps(aggregated_results.get('critical_issues', []), indent=2, ensure_ascii=False)}
248
-
249
- ่ฏทๆไพ›๏ผš
250
- 1. ๆ‰ง่กŒๆ€ป็ป“
251
- 2. ๅ…ณ้”ฎๅ‘็Žฐ
252
- 3. ้ฃŽ้™ฉ่ฏ„ไผฐ
253
- 4. ๆ”น่ฟ›ๅปบ่ฎฎ
254
- 5. ไธ‹ไธ€ๆญฅ่กŒๅŠจ่ฎกๅˆ’
255
-
256
- ่ฆๆฑ‚๏ผš
257
- - ไฝฟ็”จไธ“ไธšไธ”ๆ˜“ๆ‡‚็š„่ฏญ่จ€
258
- - ็ชๅ‡บ้‡่ฆ้—ฎ้ข˜ๅ’ŒๆˆๅŠŸไบฎ็‚น
259
- - ๆไพ›ๅ…ทไฝ“ๅฏ่กŒ็š„ๅปบ่ฎฎ
260
- - ๅŒ…ๅซ้ฃŽ้™ฉ็ญ‰็บง่ฏ„ไผฐ
261
- """
262
- logging.debug(f"Analysis Prompt: {prompt}")
263
-
264
- return prompt
265
-
266
  async def _get_error_message(self, test_session: ParallelTestSession) -> str:
267
  """Get error message from test session."""
268
  error_message = []
@@ -271,7 +219,7 @@ class ResultAggregator:
271
  # Only append if error_message is not empty
272
  if test_result.error_message:
273
  error_message.append({
274
- "issue_name": "ๆ‰ง่กŒๅผ‚ๅธธ: "+test_result.test_name,
275
  "issue_type": test_result.test_type.value,
276
  "severity": "high",
277
  "issues": test_result.error_message
@@ -327,11 +275,23 @@ class ResultAggregator:
327
  return ""
328
 
329
  def _read_js_content(self) -> str:
330
- """Read and return JavaScript content."""
331
  try:
332
- js_path = self._get_static_dir() / "assets" / "index.js"
 
 
 
 
 
 
333
  if js_path.exists():
334
  return js_path.read_text(encoding="utf-8")
 
 
 
 
 
 
335
  except Exception as e:
336
  logging.warning(f"Failed to read JS file: {e}")
337
  return ""
 
6
 
7
  from webqa_agent.data import ParallelTestSession, TestStatus
8
  from webqa_agent.llm.llm_api import LLMAPI
9
+ from webqa_agent.utils import i18n
10
 
11
  class ResultAggregator:
12
  """Aggregates and analyzes parallel test results"""
13
 
14
+ def __init__(self, report_config: dict = None):
15
+ """Initialize ResultAggregator with language support.
16
+
17
+ Args:
18
+ report_config: Configuration dictionary containing language settings
19
+ """
20
+ self.language = report_config.get("language", "zh-CN") if report_config else "zh-CN"
21
+ self.localized_strings = {
22
+ 'zh-CN': i18n.get_lang_data('zh-CN').get('aggregator', {}),
23
+ 'en-US': i18n.get_lang_data('en-US').get('aggregator', {}),
24
+ }
25
+
26
+ def _get_text(self, key: str) -> str:
27
+ """Get localized text for the given key."""
28
+ return self.localized_strings.get(self.language, {}).get(key, key)
29
+
30
  async def aggregate_results(self, test_session: ParallelTestSession) -> Dict[str, Any]:
31
  """Aggregate all test results into a comprehensive summary.
32
 
 
67
  executive_content = {
68
  "executiveSummary": "",
69
  "statistics": [
70
+ {"label": self._get_text('assessment_categories'), "value": str(total_sub_tests), "colorClass": "var(--warning-color)"},
71
+ {"label": self._get_text('passed_count'), "value": str(passed_sub_tests), "colorClass": "var(--success-color)"},
72
+ {"label": self._get_text('failed_count'), "value": str(critical_sub_tests), "colorClass": "var(--failure-color)"},
73
  ]
74
  }
75
 
76
  aggregated_results_list = [
77
+ {"id": "subtab-summary-advice", "title": self._get_text('summary_and_advice'), "content": executive_content},
78
  {
79
  "id": "subtab-issue-tracker",
80
+ "title": self._get_text('issue_list'),
81
  "content": {
82
+ "title": self._get_text('issue_tracker_list'),
83
+ "note": self._get_text('issue_list_note'),
84
  "issues": issues,
85
  },
86
  },
 
105
 
106
  # Also expose simple counters at the top-level for easy consumption
107
  return {
108
+ "title": self._get_text('assessment_overview'),
109
  "tabs": aggregated_results_list,
110
  "count":{
111
  "total": total_sub_tests,
 
133
  logging.error(f"Failed to initialise LLM, falling back to heuristic issue extraction: {e}")
134
  use_llm = False
135
 
 
136
  # Iterate over all tests and their sub-tests
137
  for test_result in test_session.test_results.values():
138
  for sub in test_result.sub_tests or []:
 
148
  severity_level = "medium"
149
 
150
  issue_entry = {
151
+ "issue_name": self._get_text('test_failed_prefix') + test_result.test_name,
152
  "issue_type": test_result.test_type.value,
153
  "sub_test_name": sub.name,
154
  "severity": severity_level,
 
162
  "final_summary": sub.final_summary,
163
  }
164
  prompt = (
165
+ f"{self._get_text('llm_prompt_main')}\n\n"
166
+ f"{self._get_text('llm_prompt_test_info')}{json.dumps(prompt_content, ensure_ascii=False, default=str)}"
 
 
 
 
 
 
167
  )
168
  logging.debug(f"LLM Issue Prompt: {prompt}")
169
  llm_response_raw = await llm.get_llm_response("", prompt)
 
211
  logging.warning(f"Failed to close LLM client: {e}")
212
  return critical_issues
213
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
214
  async def _get_error_message(self, test_session: ParallelTestSession) -> str:
215
  """Get error message from test session."""
216
  error_message = []
 
219
  # Only append if error_message is not empty
220
  if test_result.error_message:
221
  error_message.append({
222
+ "issue_name": self._get_text('execution_error_prefix') + test_result.test_name,
223
  "issue_type": test_result.test_type.value,
224
  "severity": "high",
225
  "issues": test_result.error_message
 
275
  return ""
276
 
277
  def _read_js_content(self) -> str:
278
+ """Read and return JavaScript content based on language."""
279
  try:
280
+ # Choose JS file based on language
281
+ if self.language == "en-US":
282
+ js_filename = "index_en-US.js"
283
+ else:
284
+ js_filename = "index.js" # Default to Chinese version
285
+
286
+ js_path = self._get_static_dir() / "assets" / js_filename
287
  if js_path.exists():
288
  return js_path.read_text(encoding="utf-8")
289
+ else:
290
+ # Fallback to default file if language-specific file doesn't exist
291
+ fallback_path = self._get_static_dir() / "assets" / "index.js"
292
+ if fallback_path.exists():
293
+ logging.warning(f"Language-specific JS file {js_filename} not found, using fallback")
294
+ return fallback_path.read_text(encoding="utf-8")
295
  except Exception as e:
296
  logging.warning(f"Failed to read JS file: {e}")
297
  return ""
webqa_agent/executor/test_runners.py CHANGED
@@ -16,6 +16,7 @@ from webqa_agent.testers import (LighthouseMetricsTest, PageButtonTest,
16
  WebAccessibilityTest)
17
  from webqa_agent.utils import Display
18
  from webqa_agent.utils.log_icon import icon
 
19
 
20
 
21
  class BaseTestRunner(ABC):
@@ -69,6 +70,7 @@ class UIAgentLangGraphRunner(BaseTestRunner):
69
  'remaining_objectives': business_objectives,
70
  'ui_tester_instance': parallel_tester,
71
  'current_test_case_index': 0,
 
72
  }
73
 
74
  graph_config = {'configurable': {'ui_tester_instance': parallel_tester}, 'recursion_limit': 100}
@@ -242,11 +244,11 @@ class UXTestRunner(BaseTestRunner):
242
  logging.info(f"{icon['running']} Running UX test: {test_config.test_name}")
243
  page = session.get_page()
244
 
245
- text_test = PageTextTest(llm_config)
246
  text_result: SubTestResult = await text_test.run(page=page)
247
 
248
  # Run ParallelPageContentTest
249
- content_test = PageContentTest(llm_config)
250
  content_results: List[SubTestResult] = await content_test.run(page=page)
251
 
252
  result.sub_tests = content_results + [text_result]
@@ -309,7 +311,7 @@ class LighthouseTestRunner(BaseTestRunner):
309
  return result
310
 
311
  # Run Lighthouse test
312
- lighthouse_test = LighthouseMetricsTest()
313
  lighthouse_results: SubTestResult = await lighthouse_test.run(target_url, browser_config=browser_config)
314
 
315
  result.sub_tests = [lighthouse_results]
@@ -326,13 +328,13 @@ class LighthouseTestRunner(BaseTestRunner):
326
  return result
327
 
328
 
329
- class ButtonTestRunner(BaseTestRunner):
330
- """Runner dedicated to button click tests."""
331
 
332
  async def run_test(
333
  self, session: BrowserSession, test_config: TestConfiguration, llm_config: Dict[str, Any], target_url: str
334
  ) -> TestResult:
335
- """Run Button test."""
336
 
337
  with Display.display(test_config.test_name):
338
  result = TestResult(
@@ -359,66 +361,43 @@ class ButtonTestRunner(BaseTestRunner):
359
  clickable_elements = dict(islice(clickable_elements.items(), 50))
360
  logging.warning(f'Clickable elements number is too large, only keep the first 50')
361
 
362
- button_test = PageButtonTest()
363
  button_test_result = await button_test.run(
364
  target_url, page=page, clickable_elements=clickable_elements, browser_config=browser_config
365
  )
366
 
367
- # Second subtest: each clickable result? keep detailed reports if needed; here we only include traverse test
368
- result.sub_tests = [button_test_result]
369
-
370
- # Overall metrics/status
371
- result.status = button_test_result.status
372
-
373
- logging.info(f"{icon['check']} Test completed: {test_config.test_name}")
374
-
375
- except Exception as e:
376
- error_msg = f'Button test failed: {str(e)}'
377
- result.status = TestStatus.FAILED
378
- result.error_message = error_msg
379
- logging.error(error_msg)
380
- raise
381
-
382
- return result
383
-
384
 
385
- class WebBasicCheckRunner(BaseTestRunner):
386
- """Runner for Web Basic Check tests."""
387
 
388
- async def run_test(
389
- self, session: BrowserSession, test_config: TestConfiguration, llm_config: Dict[str, Any], target_url: str
390
- ) -> TestResult:
391
- """Run Web Basic Check tests."""
392
 
393
- with Display.display(test_config.test_name):
394
- result = TestResult(
395
- test_id=test_config.test_id,
396
- test_type=test_config.test_type,
397
- test_name=test_config.test_name,
398
- status=TestStatus.RUNNING,
399
- category=get_category_for_test_type(test_config.test_type),
400
- )
401
 
402
- try:
403
- logging.info(f"{icon['running']} Running test: {test_config.test_name}")
404
- page = session.get_page()
 
 
405
 
406
- # Discover page elements
407
- from webqa_agent.crawler.crawl import CrawlHandler
 
408
 
409
- crawler = CrawlHandler(target_url)
410
- links = await crawler.extract_links(page)
411
- logging.info(f'Crawled {len(links)} links')
412
- # WebAccessibilityTest
413
- accessibility_test = WebAccessibilityTest()
414
- accessibility_result = await accessibility_test.run(target_url, links)
415
 
416
- result.sub_tests = [accessibility_result]
417
- result.status = accessibility_result.status
418
  logging.info(f"{icon['check']} Test completed: {test_config.test_name}")
419
 
420
  except Exception as e:
421
- error_msg = f'Web Basic Check test failed: {str(e)}'
422
  result.status = TestStatus.FAILED
423
  result.error_message = error_msg
424
  logging.error(error_msg)
@@ -426,37 +405,156 @@ class WebBasicCheckRunner(BaseTestRunner):
426
 
427
  return result
428
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
429
 
430
  class SecurityTestRunner(BaseTestRunner):
431
  """Runner for Security tests using Nuclei-based scanning."""
432
 
433
- # ๅธธ่ง็ฝ‘็ปœๆ‰ซๆๆ ‡็ญพ้…็ฝฎ
434
- SCAN_TAGS = {
435
- 'cve': 'ๅทฒ็ŸฅCVEๆผๆดžๆ‰ซๆ',
436
- 'xss': '่ทจ็ซ™่„šๆœฌๆ”ปๅ‡ปๆฃ€ๆต‹',
437
- 'sqli': 'SQLๆณจๅ…ฅๆฃ€ๆต‹',
438
- 'rce': '่ฟœ็จ‹ไปฃ็ ๆ‰ง่กŒๆฃ€ๆต‹',
439
- 'lfi': 'ๆœฌๅœฐๆ–‡ไปถๅŒ…ๅซๆฃ€ๆต‹',
440
- 'ssrf': 'ๆœๅŠก็ซฏ่ฏทๆฑ‚ไผช้€ ๆฃ€ๆต‹',
441
- 'redirect': 'ๅผ€ๆ”พ้‡ๅฎšๅ‘ๆฃ€ๆต‹',
442
- 'exposure': 'ๆ•ๆ„Ÿไฟกๆฏๆณ„้œฒๆฃ€ๆต‹',
443
- 'config': '้…็ฝฎ้”™่ฏฏๆฃ€ๆต‹',
444
- 'default-login': '้ป˜่ฎคๅ‡ญๆฎๆฃ€ๆต‹',
445
- 'ssl': 'SSL/TLS้…็ฝฎๆฃ€ๆต‹',
446
- 'dns': 'DNS็›ธๅ…ณๆฃ€ๆต‹',
447
- 'subdomain-takeover': 'ๅญๅŸŸๅๆŽฅ็ฎกๆฃ€ๆต‹',
448
- 'tech': 'ๆŠ€ๆœฏๆ ˆ่ฏ†ๅˆซ',
449
- 'panel': '็ฎก็†้ขๆฟๆฃ€ๆต‹',
450
- }
451
-
452
- # ๅ่ฎฎ็ฑปๅž‹ๆ‰ซๆ
453
- PROTOCOL_SCANS = {'http': 'HTTPๅ่ฎฎๆ‰ซๆ', 'dns': 'DNSๅ่ฎฎๆ‰ซๆ', 'tcp': 'TCPๅ่ฎฎๆ‰ซๆ', 'ssl': 'SSLๅ่ฎฎๆ‰ซๆ'}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
454
 
455
  async def run_test(
456
  self, session: BrowserSession, test_config: TestConfiguration, llm_config: Dict[str, Any], target_url: str
457
  ) -> TestResult:
458
  """Run Security tests using Nuclei scanning."""
459
 
 
460
  with Display.display(test_config.test_name):
461
  result = TestResult(
462
  test_id=test_config.test_id,
@@ -475,7 +573,7 @@ class SecurityTestRunner(BaseTestRunner):
475
 
476
  if not nuclei_available:
477
  result.status = TestStatus.FAILED
478
- result.error_message = 'Nuclei tool not found. Please install nuclei: go install -v github.com/projectdiscovery/nuclei/v3/cmd/nuclei@latest'
479
  return result
480
 
481
  # ๆ‰ง่กŒๅฎ‰ๅ…จๆ‰ซๆ
@@ -514,33 +612,38 @@ class SecurityTestRunner(BaseTestRunner):
514
 
515
  # ๆž„ๅปบๆŠฅๅ‘Šๅ†…ๅฎน
516
  if count == 0:
517
- issues_text = f'ๆœชๅ‘็Žฐ{severity.upper()}็บงๅˆซๅฎ‰ๅ…จ้—ฎ้ข˜'
518
  else:
519
  # ๅ–ๅ‰3ไธช้—ฎ้ข˜็š„ๅ็งฐไฝœไธบ็คบไพ‹
520
  sample_issues = [f['name'] for f in severity_findings[:3]]
521
- issues_text = f'ๅ‘็Žฐ{count}ไธช{severity.upper()}็บงๅˆซๅฎ‰ๅ…จ้—ฎ้ข˜'
522
  if sample_issues:
523
- issues_text += f"๏ผš{', '.join(sample_issues)}"
524
  if count > 3:
525
- issues_text += f' ็ญ‰{count}ไธช้—ฎ้ข˜'
526
 
527
  sub_tests.append(
528
  SubTestResult(
529
- name=f'{severity.upper()}็บงๅˆซๅฎ‰ๅ…จ้—ฎ้ข˜ๆ‰ซๆ',
530
  status=TestStatus.PASSED,
531
  metrics={'findings_count': count},
532
- report=[SubTestReport(title=f'{severity.upper()}็บงๅˆซๅฎ‰ๅ…จๆผๆดžๆ‰ซๆ', issues=issues_text)],
 
 
 
533
  )
534
  )
535
 
536
  # ๅˆ›ๅปบๆ‰ซๆ็ฑปๅž‹็š„ๅญๆต‹่ฏ•
537
- for scan_type, description in {**self.SCAN_TAGS, **self.PROTOCOL_SCANS}.items():
 
 
538
  type_findings = [f for f in finding_details if scan_type in f.get('template_id', '').lower()]
539
  type_count = len(type_findings)
540
 
541
  # ๆž„ๅปบๆ‰ซๆ็ฑปๅž‹ๆŠฅๅ‘Šๅ†…ๅฎน
542
  if type_count == 0:
543
- issues_text = f'{description}๏ผšๆœชๅ‘็Žฐ็›ธๅ…ณๅฎ‰ๅ…จ้—ฎ้ข˜'
544
  else:
545
  # ๆŒ‰ไธฅ้‡็จ‹ๅบฆ็ปŸ่ฎก่ฏฅ็ฑปๅž‹็š„ๅ‘็Žฐ
546
  type_severity_counts = {}
@@ -551,24 +654,27 @@ class SecurityTestRunner(BaseTestRunner):
551
  severity_summary = []
552
  for sev in ['critical', 'high', 'medium', 'low', 'info']:
553
  if type_severity_counts.get(sev, 0) > 0:
554
- severity_summary.append(f'{sev.upper()}็บง{type_severity_counts[sev]}ไธช')
555
 
556
- issues_text = f'{description}๏ผšๅ‘็Žฐ{type_count}ไธช้—ฎ้ข˜'
557
  if severity_summary:
558
- issues_text += f"๏ผˆ{', '.join(severity_summary)}๏ผ‰"
559
 
560
  # ๆทปๅŠ ๅ…ทไฝ“้—ฎ้ข˜็คบไพ‹๏ผˆๆœ€ๅคš3ไธช๏ผ‰
561
  if type_findings:
562
  sample_names = [f['name'] for f in type_findings[:2]]
563
  if sample_names:
564
- issues_text += f"๏ผŒๅŒ…ๆ‹ฌ๏ผš{', '.join(sample_names)}"
565
  if type_count > 2:
566
- issues_text += ' ็ญ‰'
567
 
568
  combined_reports = []
569
  if not finding_details:
570
  # No security issues found
571
- combined_reports.append(SubTestReport(title='ๅฎ‰ๅ…จๆฃ€ๆŸฅ', issues='ๆ— ๅ‘็Žฐ้—ฎ้ข˜'))
 
 
 
572
  else:
573
  for fd in finding_details:
574
  title = f"[{fd.get('severity', 'unknown').upper()}] {fd.get('name')}"
@@ -576,15 +682,15 @@ class SecurityTestRunner(BaseTestRunner):
576
  if fd.get('description'):
577
  details_parts.append(fd['description'])
578
  if fd.get('matched_at'):
579
- details_parts.append(f"Matched at: {fd['matched_at']}")
580
  if fd.get('extracted_results'):
581
- details_parts.append(f"Extracted: {', '.join(map(str, fd['extracted_results']))}")
582
- issues_text = ' | '.join(details_parts) if details_parts else 'No further details.'
583
  combined_reports.append(SubTestReport(title=title, issues=issues_text))
584
 
585
  sub_tests = [
586
  SubTestResult(
587
- name='nucleiๆฃ€ๆŸฅ',
588
  status=TestStatus.PASSED,
589
  metrics={
590
  'total_findings': len(finding_details),
@@ -655,14 +761,18 @@ class SecurityTestRunner(BaseTestRunner):
655
  temp_dir.mkdir(parents=True, exist_ok=True)
656
 
657
  # ้…็ฝฎๆ‰ซๆไปปๅŠก
658
- scan_configs = {'tag': self.SCAN_TAGS, 'protocol': self.PROTOCOL_SCANS}
659
 
660
  # ไปŽๆต‹่ฏ•้…็ฝฎไธญ่Žทๅ–่‡ชๅฎšไน‰ๅ‚ๆ•ฐ
661
  custom_config = test_config.test_specific_config or {}
662
  include_severity_scans = custom_config.get('include_severity_scans', True)
663
 
664
  if include_severity_scans:
665
- scan_configs['severity'] = {'critical': 'ไธฅ้‡ๆผๆดžๆ‰ซๆ', 'high': '้ซ˜ๅฑๆผๆดžๆ‰ซๆ', 'medium': 'ไธญๅฑๆผๆดžๆ‰ซๆ'}
 
 
 
 
666
 
667
  # ๆ‰ง่กŒๅนถ่กŒๆ‰ซๆ
668
  scan_results = await self._execute_scan_batch(target_url, scan_configs, temp_dir)
@@ -775,4 +885,4 @@ class SecurityTestRunner(BaseTestRunner):
775
  shutil.rmtree(temp_dir)
776
  logging.debug(f'Cleaned up temporary security scan files: {temp_path}')
777
  except Exception as e:
778
- logging.warning(f'Failed to cleanup temporary files at {temp_path}: {e}')
 
16
  WebAccessibilityTest)
17
  from webqa_agent.utils import Display
18
  from webqa_agent.utils.log_icon import icon
19
+ from webqa_agent.utils import i18n
20
 
21
 
22
  class BaseTestRunner(ABC):
 
70
  'remaining_objectives': business_objectives,
71
  'ui_tester_instance': parallel_tester,
72
  'current_test_case_index': 0,
73
+ 'language': test_config.report_config.get('language', 'zh-CN'),
74
  }
75
 
76
  graph_config = {'configurable': {'ui_tester_instance': parallel_tester}, 'recursion_limit': 100}
 
244
  logging.info(f"{icon['running']} Running UX test: {test_config.test_name}")
245
  page = session.get_page()
246
 
247
+ text_test = PageTextTest(llm_config, report_config=test_config.report_config)
248
  text_result: SubTestResult = await text_test.run(page=page)
249
 
250
  # Run ParallelPageContentTest
251
+ content_test = PageContentTest(llm_config, report_config=test_config.report_config)
252
  content_results: List[SubTestResult] = await content_test.run(page=page)
253
 
254
  result.sub_tests = content_results + [text_result]
 
311
  return result
312
 
313
  # Run Lighthouse test
314
+ lighthouse_test = LighthouseMetricsTest(report_config=test_config.report_config)
315
  lighthouse_results: SubTestResult = await lighthouse_test.run(target_url, browser_config=browser_config)
316
 
317
  result.sub_tests = [lighthouse_results]
 
328
  return result
329
 
330
 
331
+ class BasicTestRunner(BaseTestRunner):
332
+ """Runner for Traversal tests."""
333
 
334
  async def run_test(
335
  self, session: BrowserSession, test_config: TestConfiguration, llm_config: Dict[str, Any], target_url: str
336
  ) -> TestResult:
337
+ """Run UX tests with enhanced screenshot and data collection."""
338
 
339
  with Display.display(test_config.test_name):
340
  result = TestResult(
 
361
  clickable_elements = dict(islice(clickable_elements.items(), 50))
362
  logging.warning(f'Clickable elements number is too large, only keep the first 50')
363
 
364
+ button_test = PageButtonTest(report_config=test_config.report_config)
365
  button_test_result = await button_test.run(
366
  target_url, page=page, clickable_elements=clickable_elements, browser_config=browser_config
367
  )
368
 
369
+ crawler = CrawlHandler(target_url)
370
+ links = await crawler.extract_links(page)
371
+ logging.info(f'Crawled {len(links)} links')
372
+ # WebAccessibilityTest
373
+ accessibility_test = WebAccessibilityTest(report_config=test_config.report_config)
374
+ accessibility_result = await accessibility_test.run(target_url, links)
 
 
 
 
 
 
 
 
 
 
 
375
 
 
 
376
 
377
+ # Combine test results into a list
378
+ result.sub_tests = [button_test_result, accessibility_result]
 
 
379
 
380
+ # Extract metrics
381
+ button_status = button_test_result.status if button_test_result else TestStatus.FAILED
382
+ accessibility_status = accessibility_result.status if accessibility_result else TestStatus.FAILED
 
 
 
 
 
383
 
384
+ # Determine overall status
385
+ if button_status == TestStatus.PASSED and accessibility_status == TestStatus.PASSED:
386
+ result.status = TestStatus.PASSED
387
+ else:
388
+ result.status = TestStatus.FAILED
389
 
390
+ # Collect errors from all tests
391
+ all_results = [button_test_result, accessibility_result]
392
+ errors = [r.messages.get('page') for r in all_results if r and r.messages and 'page' in r.messages]
393
 
394
+ if errors:
395
+ result.error_message = '; '.join(errors)
 
 
 
 
396
 
 
 
397
  logging.info(f"{icon['check']} Test completed: {test_config.test_name}")
398
 
399
  except Exception as e:
400
+ error_msg = f'Button test failed: {str(e)}'
401
  result.status = TestStatus.FAILED
402
  result.error_message = error_msg
403
  logging.error(error_msg)
 
405
 
406
  return result
407
 
408
+ # class ButtonTestRunner(BaseTestRunner):
409
+ # """Runner dedicated to button click tests."""
410
+
411
+ # async def run_test(
412
+ # self, session: BrowserSession, test_config: TestConfiguration, llm_config: Dict[str, Any], target_url: str
413
+ # ) -> TestResult:
414
+ # """Run Button test."""
415
+
416
+ # with Display.display(test_config.test_name):
417
+ # result = TestResult(
418
+ # test_id=test_config.test_id,
419
+ # test_type=test_config.test_type,
420
+ # test_name=test_config.test_name,
421
+ # status=TestStatus.RUNNING,
422
+ # category=get_category_for_test_type(test_config.test_type),
423
+ # )
424
+
425
+ # try:
426
+ # logging.info(f"{icon['running']} Running test: {test_config.test_name}")
427
+ # page = session.get_page()
428
+ # browser_config = session.browser_config
429
+
430
+ # # Discover clickable elements via crawler
431
+ # from webqa_agent.crawler.crawl import CrawlHandler
432
+
433
+ # crawler = CrawlHandler(target_url)
434
+ # clickable_elements = await crawler.clickable_elements_detection(page)
435
+ # logging.info(f'Crawled {len(clickable_elements)} clickable elements')
436
+ # if len(clickable_elements) > 50:
437
+ # from itertools import islice
438
+ # clickable_elements = dict(islice(clickable_elements.items(), 50))
439
+ # logging.warning(f'Clickable elements number is too large, only keep the first 50')
440
+
441
+ # button_test = PageButtonTest()
442
+ # button_test_result = await button_test.run(
443
+ # target_url, page=page, clickable_elements=clickable_elements, browser_config=browser_config
444
+ # )
445
+
446
+ # # Second subtest: each clickable result? keep detailed reports if needed; here we only include traverse test
447
+ # result.sub_tests = [button_test_result]
448
+
449
+ # # Overall metrics/status
450
+ # result.status = button_test_result.status
451
+
452
+ # logging.info(f"{icon['check']} Test completed: {test_config.test_name}")
453
+
454
+ # except Exception as e:
455
+ # error_msg = f'Button test failed: {str(e)}'
456
+ # result.status = TestStatus.FAILED
457
+ # result.error_message = error_msg
458
+ # logging.error(error_msg)
459
+ # raise
460
+
461
+ # return result
462
+
463
+
464
+ # class WebBasicCheckRunner(BaseTestRunner):
465
+ # """Runner for Web Basic Check tests."""
466
+
467
+ # async def run_test(
468
+ # self, session: BrowserSession, test_config: TestConfiguration, llm_config: Dict[str, Any], target_url: str
469
+ # ) -> TestResult:
470
+ # """Run Web Basic Check tests."""
471
+
472
+ # with Display.display(test_config.test_name):
473
+ # result = TestResult(
474
+ # test_id=test_config.test_id,
475
+ # test_type=test_config.test_type,
476
+ # test_name=test_config.test_name,
477
+ # status=TestStatus.RUNNING,
478
+ # category=get_category_for_test_type(test_config.test_type),
479
+ # )
480
+
481
+ # try:
482
+ # logging.info(f"{icon['running']} Running test: {test_config.test_name}")
483
+ # page = session.get_page()
484
+
485
+ # # Discover page elements
486
+ # from webqa_agent.crawler.crawl import CrawlHandler
487
+
488
+ # crawler = CrawlHandler(target_url)
489
+ # links = await crawler.extract_links(page)
490
+ # logging.info(f'Crawled {len(links)} links')
491
+ # # WebAccessibilityTest
492
+ # accessibility_test = WebAccessibilityTest(self.llm_config, report_config=self.report_config)
493
+ # accessibility_result = await accessibility_test.run(target_url, links)
494
+
495
+ # result.sub_tests = [accessibility_result]
496
+ # result.status = accessibility_result.status
497
+ # logging.info(f"{icon['check']} Test completed: {test_config.test_name}")
498
+
499
+ # except Exception as e:
500
+ # error_msg = f'Web Basic Check test failed: {str(e)}'
501
+ # result.status = TestStatus.FAILED
502
+ # result.error_message = error_msg
503
+ # logging.error(error_msg)
504
+ # raise
505
+
506
+ # return result
507
 
508
  class SecurityTestRunner(BaseTestRunner):
509
  """Runner for Security tests using Nuclei-based scanning."""
510
 
511
+ def __init__(self):
512
+ super().__init__()
513
+ self.language = 'zh-CN' # Default language
514
+ self.localized_strings = {
515
+ 'zh-CN': i18n.get_lang_data('zh-CN').get('testers', {}).get('security', {}),
516
+ 'en-US': i18n.get_lang_data('en-US').get('testers', {}).get('security', {}),
517
+ }
518
+
519
+ def _get_text(self, key: str) -> str:
520
+ """Get localized text for the current language."""
521
+ return self.localized_strings.get(self.language, {}).get(key, key)
522
+
523
+ def get_scan_tags(self, language: str) -> Dict[str, str]:
524
+ """Get scan tags with localized descriptions."""
525
+ return {
526
+ 'cve': self._get_text('cve_scan'),
527
+ 'xss': self._get_text('xss_scan'),
528
+ 'sqli': self._get_text('sqli_scan'),
529
+ 'rce': self._get_text('rce_scan'),
530
+ 'lfi': self._get_text('lfi_scan'),
531
+ 'ssrf': self._get_text('ssrf_scan'),
532
+ 'redirect': self._get_text('redirect_scan'),
533
+ 'exposure': self._get_text('exposure_scan'),
534
+ 'config': self._get_text('config_scan'),
535
+ 'default-login': self._get_text('default_login_scan'),
536
+ 'ssl': self._get_text('ssl_scan'),
537
+ 'dns': self._get_text('dns_scan'),
538
+ 'subdomain-takeover': self._get_text('subdomain_takeover_scan'),
539
+ 'tech': self._get_text('tech_scan'),
540
+ 'panel': self._get_text('panel_scan'),
541
+ }
542
+
543
+ def get_protocol_scans(self, language: str) -> Dict[str, str]:
544
+ """Get protocol scans with localized descriptions."""
545
+ return {
546
+ 'http': self._get_text('http_protocol'),
547
+ 'dns': self._get_text('dns_protocol'),
548
+ 'tcp': self._get_text('tcp_protocol'),
549
+ 'ssl': self._get_text('ssl_protocol'),
550
+ }
551
 
552
  async def run_test(
553
  self, session: BrowserSession, test_config: TestConfiguration, llm_config: Dict[str, Any], target_url: str
554
  ) -> TestResult:
555
  """Run Security tests using Nuclei scanning."""
556
 
557
+ self.language = test_config.report_config.get('language', 'zh-CN')
558
  with Display.display(test_config.test_name):
559
  result = TestResult(
560
  test_id=test_config.test_id,
 
573
 
574
  if not nuclei_available:
575
  result.status = TestStatus.FAILED
576
+ result.error_message = self._get_text('nuclei_not_found')
577
  return result
578
 
579
  # ๆ‰ง่กŒๅฎ‰ๅ…จๆ‰ซๆ
 
612
 
613
  # ๆž„ๅปบๆŠฅๅ‘Šๅ†…ๅฎน
614
  if count == 0:
615
+ issues_text = self._get_text('no_severity_issues').format(severity=severity.upper())
616
  else:
617
  # ๅ–ๅ‰3ไธช้—ฎ้ข˜็š„ๅ็งฐไฝœไธบ็คบไพ‹
618
  sample_issues = [f['name'] for f in severity_findings[:3]]
619
+ issues_text = self._get_text('found_severity_issues').format(count=count, severity=severity.upper())
620
  if sample_issues:
621
+ issues_text += f": {', '.join(sample_issues)}"
622
  if count > 3:
623
+ issues_text += f" {self._get_text('and_more')}"
624
 
625
  sub_tests.append(
626
  SubTestResult(
627
+ name=self._get_text('severity_level_scan').format(severity=severity.upper()),
628
  status=TestStatus.PASSED,
629
  metrics={'findings_count': count},
630
+ report=[SubTestReport(
631
+ title=self._get_text('severity_level_vulnerability').format(severity=severity.upper()),
632
+ issues=issues_text
633
+ )],
634
  )
635
  )
636
 
637
  # ๅˆ›ๅปบๆ‰ซๆ็ฑปๅž‹็š„ๅญๆต‹่ฏ•
638
+ scan_tags = self.get_scan_tags(self.language)
639
+ protocol_scans = self.get_protocol_scans(self.language)
640
+ for scan_type, description in {**scan_tags, **protocol_scans}.items():
641
  type_findings = [f for f in finding_details if scan_type in f.get('template_id', '').lower()]
642
  type_count = len(type_findings)
643
 
644
  # ๆž„ๅปบๆ‰ซๆ็ฑปๅž‹ๆŠฅๅ‘Šๅ†…ๅฎน
645
  if type_count == 0:
646
+ issues_text = f"{description}: {self._get_text('no_security_issues')}"
647
  else:
648
  # ๆŒ‰ไธฅ้‡็จ‹ๅบฆ็ปŸ่ฎก่ฏฅ็ฑปๅž‹็š„ๅ‘็Žฐ
649
  type_severity_counts = {}
 
654
  severity_summary = []
655
  for sev in ['critical', 'high', 'medium', 'low', 'info']:
656
  if type_severity_counts.get(sev, 0) > 0:
657
+ severity_summary.append(f"{sev.upper()} {i18n.t(self.language, 'common.level', 'level')} {type_severity_counts[sev]} {i18n.t(self.language, 'common.issues', 'issues')}")
658
 
659
+ issues_text = f"{description}: {self._get_text('found_issues').format(count=type_count)}"
660
  if severity_summary:
661
+ issues_text += f" ({', '.join(severity_summary)})"
662
 
663
  # ๆทปๅŠ ๅ…ทไฝ“้—ฎ้ข˜็คบไพ‹๏ผˆๆœ€ๅคš3ไธช๏ผ‰
664
  if type_findings:
665
  sample_names = [f['name'] for f in type_findings[:2]]
666
  if sample_names:
667
+ issues_text += f", {self._get_text('including')}: {', '.join(sample_names)}"
668
  if type_count > 2:
669
+ issues_text += f" {self._get_text('and_more')}"
670
 
671
  combined_reports = []
672
  if not finding_details:
673
  # No security issues found
674
+ combined_reports.append(SubTestReport(
675
+ title=self._get_text('security_check'),
676
+ issues=self._get_text('no_issues_found')
677
+ ))
678
  else:
679
  for fd in finding_details:
680
  title = f"[{fd.get('severity', 'unknown').upper()}] {fd.get('name')}"
 
682
  if fd.get('description'):
683
  details_parts.append(fd['description'])
684
  if fd.get('matched_at'):
685
+ details_parts.append(f"{self._get_text('matched_at')}: {fd['matched_at']}")
686
  if fd.get('extracted_results'):
687
+ details_parts.append(f"{self._get_text('extracted')}: {', '.join(map(str, fd['extracted_results']))}")
688
+ issues_text = ' | '.join(details_parts) if details_parts else self._get_text('no_details')
689
  combined_reports.append(SubTestReport(title=title, issues=issues_text))
690
 
691
  sub_tests = [
692
  SubTestResult(
693
+ name=self._get_text('nuclei_check'),
694
  status=TestStatus.PASSED,
695
  metrics={
696
  'total_findings': len(finding_details),
 
761
  temp_dir.mkdir(parents=True, exist_ok=True)
762
 
763
  # ้…็ฝฎๆ‰ซๆไปปๅŠก
764
+ scan_configs = {'tag': self.get_scan_tags(self.language), 'protocol': self.get_protocol_scans(self.language)}
765
 
766
  # ไปŽๆต‹่ฏ•้…็ฝฎไธญ่Žทๅ–่‡ชๅฎšไน‰ๅ‚ๆ•ฐ
767
  custom_config = test_config.test_specific_config or {}
768
  include_severity_scans = custom_config.get('include_severity_scans', True)
769
 
770
  if include_severity_scans:
771
+ scan_configs['severity'] = {
772
+ 'critical': self._get_text('critical_vulnerability'),
773
+ 'high': self._get_text('high_risk_vulnerability'),
774
+ 'medium': self._get_text('medium_risk_vulnerability')
775
+ }
776
 
777
  # ๆ‰ง่กŒๅนถ่กŒๆ‰ซๆ
778
  scan_results = await self._execute_scan_batch(target_url, scan_configs, temp_dir)
 
885
  shutil.rmtree(temp_dir)
886
  logging.debug(f'Cleaned up temporary security scan files: {temp_path}')
887
  except Exception as e:
888
+ logging.warning(f'Failed to cleanup temporary files at {temp_path}: {e}')
webqa_agent/llm/prompt.py CHANGED
@@ -610,7 +610,7 @@ class LLMPrompt:
610
  # You are a web content quality inspector. You need to carefully read the text content of the webpage and complete the task based on the user's test objective. Please ensure that the output JSON format does not contain any code blocks or backticks.
611
 
612
  TEXT_USER_CASES = [
613
- """ๅ†…ๅฎน็บ ้”™: Carefully inspect the text on the current page and identify any spelling, grammar, or character errors.
614
  Text Accuracy: Spelling errors, grammatical errors, punctuation errors; inconsistent formatting of numbers, units, and currency.
615
  Wording & Tone: Consistent wording; consistent terminology and abbreviations; consistent tone of voice with the product.
616
  Language Consistency: Inappropriate mixing of languages โ€‹โ€‹within the page (e.g., mixing Chinese and English without spacing).
@@ -626,7 +626,7 @@ class LLMPrompt:
626
  """
627
  ]
628
  CONTENT_USER_CASES = [
629
- """ๅธƒๅฑ€ๆฃ€ๆŸฅ: Rigorously review each screenshot at the current viewport for layout issues, and provide specific, actionable recommendations.
630
 
631
  [Checklist]
632
  1. Text alignment: Misaligned headings/paragraphs/lists; inconsistent margins or baselines
@@ -644,7 +644,7 @@ class LLMPrompt:
644
  - If multiple layout issues exist in the same screenshot, merge them into a single object and list them in the 'issue' field separated by semicolons
645
  - If no issues are found, output strictly None (no explanation)
646
  """,
647
- """ๅ…ƒ็ด ๆฃ€ๆŸฅ: Rigorously check each screenshot for missing key functional/content/navigation elements, loading failures, or display anomalies, and provide fix suggestions.
648
 
649
  [Checklist]
650
  1. Functional elements: Buttons/links/inputs/dropdowns/pagination/search etc. missing or misplaced
 
610
  # You are a web content quality inspector. You need to carefully read the text content of the webpage and complete the task based on the user's test objective. Please ensure that the output JSON format does not contain any code blocks or backticks.
611
 
612
  TEXT_USER_CASES = [
613
+ """Carefully inspect the text on the current page and identify any spelling, grammar, or character errors.
614
  Text Accuracy: Spelling errors, grammatical errors, punctuation errors; inconsistent formatting of numbers, units, and currency.
615
  Wording & Tone: Consistent wording; consistent terminology and abbreviations; consistent tone of voice with the product.
616
  Language Consistency: Inappropriate mixing of languages โ€‹โ€‹within the page (e.g., mixing Chinese and English without spacing).
 
626
  """
627
  ]
628
  CONTENT_USER_CASES = [
629
+ """Rigorously review each screenshot at the current viewport for layout issues, and provide specific, actionable recommendations.
630
 
631
  [Checklist]
632
  1. Text alignment: Misaligned headings/paragraphs/lists; inconsistent margins or baselines
 
644
  - If multiple layout issues exist in the same screenshot, merge them into a single object and list them in the 'issue' field separated by semicolons
645
  - If no issues are found, output strictly None (no explanation)
646
  """,
647
+ """Rigorously check each screenshot for missing key functional/content/navigation elements, loading failures, or display anomalies, and provide fix suggestions.
648
 
649
  [Checklist]
650
  1. Functional elements: Buttons/links/inputs/dropdowns/pagination/search etc. missing or misplaced
webqa_agent/static/assets/index.js CHANGED
@@ -217,4 +217,4 @@ html body {
217
  `))+1))}const a="#".repeat(i),l=n.enter("headingAtx"),s=n.enter("phrasing");o.move(a+" ");let u=n.containerPhrasing(e,{before:"# ",after:`
218
  `,...o.current()});return/^[\t ]/.test(u)&&(u=es(u.charCodeAt(0))+u.slice(1)),u=u?a+" "+u:a,n.options.closeAtx&&(u+=" "+a),s(),l(),u}RC.peek=JD;function RC(e){return e.value||""}function JD(){return"<"}MC.peek=e3;function MC(e,t,n,r){const i=dg(n),o=i==='"'?"Quote":"Apostrophe",a=n.enter("image");let l=n.enter("label");const s=n.createTracker(r);let u=s.move("![");return u+=s.move(n.safe(e.alt,{before:u,after:"]",...s.current()})),u+=s.move("]("),l(),!e.url&&e.title||/[\0- \u007F]/.test(e.url)?(l=n.enter("destinationLiteral"),u+=s.move("<"),u+=s.move(n.safe(e.url,{before:u,after:">",...s.current()})),u+=s.move(">")):(l=n.enter("destinationRaw"),u+=s.move(n.safe(e.url,{before:u,after:e.title?" ":")",...s.current()}))),l(),e.title&&(l=n.enter(`title${o}`),u+=s.move(" "+i),u+=s.move(n.safe(e.title,{before:u,after:i,...s.current()})),u+=s.move(i),l()),u+=s.move(")"),a(),u}function e3(){return"!"}OC.peek=t3;function OC(e,t,n,r){const i=e.referenceType,o=n.enter("imageReference");let a=n.enter("label");const l=n.createTracker(r);let s=l.move("![");const u=n.safe(e.alt,{before:s,after:"]",...l.current()});s+=l.move(u+"]["),a();const c=n.stack;n.stack=[],a=n.enter("reference");const f=n.safe(n.associationId(e),{before:s,after:"]",...l.current()});return a(),n.stack=c,o(),i==="full"||!u||u!==f?s+=l.move(f+"]"):i==="shortcut"?s=s.slice(0,-1):s+=l.move("]"),s}function t3(){return"!"}NC.peek=n3;function NC(e,t,n){let r=e.value||"",i="`",o=-1;for(;new RegExp("(^|[^`])"+i+"([^`]|$)").test(r);)i+="`";for(/[^ \r\n]/.test(r)&&(/^[ \r\n]/.test(r)&&/[ \r\n]$/.test(r)||/^`|`$/.test(r))&&(r=" "+r+" ");++o<n.unsafe.length;){const a=n.unsafe[o],l=n.compilePattern(a);let s;if(a.atBreak)for(;s=l.exec(r);){let u=s.index;r.charCodeAt(u)===10&&r.charCodeAt(u-1)===13&&u--,r=r.slice(0,u)+" "+r.slice(s.index+1)}}return i+r+i}function n3(){return"`"}function AC(e,t){const n=rg(e);return!!(!t.options.resourceLink&&e.url&&!e.title&&e.children&&e.children.length===1&&e.children[0].type==="text"&&(n===e.url||"mailto:"+n===e.url)&&/^[a-z][a-z+.-]+:/i.test(e.url)&&!/[\0- <>\u007F]/.test(e.url))}FC.peek=r3;function FC(e,t,n,r){const i=dg(n),o=i==='"'?"Quote":"Apostrophe",a=n.createTracker(r);let l,s;if(AC(e,n)){const c=n.stack;n.stack=[],l=n.enter("autolink");let f=a.move("<");return f+=a.move(n.containerPhrasing(e,{before:f,after:">",...a.current()})),f+=a.move(">"),l(),n.stack=c,f}l=n.enter("link"),s=n.enter("label");let u=a.move("[");return u+=a.move(n.containerPhrasing(e,{before:u,after:"](",...a.current()})),u+=a.move("]("),s(),!e.url&&e.title||/[\0- \u007F]/.test(e.url)?(s=n.enter("destinationLiteral"),u+=a.move("<"),u+=a.move(n.safe(e.url,{before:u,after:">",...a.current()})),u+=a.move(">")):(s=n.enter("destinationRaw"),u+=a.move(n.safe(e.url,{before:u,after:e.title?" ":")",...a.current()}))),s(),e.title&&(s=n.enter(`title${o}`),u+=a.move(" "+i),u+=a.move(n.safe(e.title,{before:u,after:i,...a.current()})),u+=a.move(i),s()),u+=a.move(")"),l(),u}function r3(e,t,n){return AC(e,n)?"<":"["}LC.peek=i3;function LC(e,t,n,r){const i=e.referenceType,o=n.enter("linkReference");let a=n.enter("label");const l=n.createTracker(r);let s=l.move("[");const u=n.containerPhrasing(e,{before:s,after:"]",...l.current()});s+=l.move(u+"]["),a();const c=n.stack;n.stack=[],a=n.enter("reference");const f=n.safe(n.associationId(e),{before:s,after:"]",...l.current()});return a(),n.stack=c,o(),i==="full"||!u||u!==f?s+=l.move(f+"]"):i==="shortcut"?s=s.slice(0,-1):s+=l.move("]"),s}function i3(){return"["}function hg(e){const t=e.options.bullet||"*";if(t!=="*"&&t!=="+"&&t!=="-")throw new Error("Cannot serialize items with `"+t+"` for `options.bullet`, expected `*`, `+`, or `-`");return t}function o3(e){const t=hg(e),n=e.options.bulletOther;if(!n)return t==="*"?"-":"*";if(n!=="*"&&n!=="+"&&n!=="-")throw new Error("Cannot serialize items with `"+n+"` for `options.bulletOther`, expected `*`, `+`, or `-`");if(n===t)throw new Error("Expected `bullet` (`"+t+"`) and `bulletOther` (`"+n+"`) to be different");return n}function a3(e){const t=e.options.bulletOrdered||".";if(t!=="."&&t!==")")throw new Error("Cannot serialize items with `"+t+"` for `options.bulletOrdered`, expected `.` or `)`");return t}function zC(e){const t=e.options.rule||"*";if(t!=="*"&&t!=="-"&&t!=="_")throw new Error("Cannot serialize rules with `"+t+"` for `options.rule`, expected `*`, `-`, or `_`");return t}function l3(e,t,n,r){const i=n.enter("list"),o=n.bulletCurrent;let a=e.ordered?a3(n):hg(n);const l=e.ordered?a==="."?")":".":o3(n);let s=t&&n.bulletLastUsed?a===n.bulletLastUsed:!1;if(!e.ordered){const c=e.children?e.children[0]:void 0;if((a==="*"||a==="-")&&c&&(!c.children||!c.children[0])&&n.stack[n.stack.length-1]==="list"&&n.stack[n.stack.length-2]==="listItem"&&n.stack[n.stack.length-3]==="list"&&n.stack[n.stack.length-4]==="listItem"&&n.indexStack[n.indexStack.length-1]===0&&n.indexStack[n.indexStack.length-2]===0&&n.indexStack[n.indexStack.length-3]===0&&(s=!0),zC(n)===a&&c){let f=-1;for(;++f<e.children.length;){const d=e.children[f];if(d&&d.type==="listItem"&&d.children&&d.children[0]&&d.children[0].type==="thematicBreak"){s=!0;break}}}}s&&(a=l),n.bulletCurrent=a;const u=n.containerFlow(e,r);return n.bulletLastUsed=a,n.bulletCurrent=o,i(),u}function s3(e){const t=e.options.listItemIndent||"one";if(t!=="tab"&&t!=="one"&&t!=="mixed")throw new Error("Cannot serialize items with `"+t+"` for `options.listItemIndent`, expected `tab`, `one`, or `mixed`");return t}function u3(e,t,n,r){const i=s3(n);let o=n.bulletCurrent||hg(n);t&&t.type==="list"&&t.ordered&&(o=(typeof t.start=="number"&&t.start>-1?t.start:1)+(n.options.incrementListMarker===!1?0:t.children.indexOf(e))+o);let a=o.length+1;(i==="tab"||i==="mixed"&&(t&&t.type==="list"&&t.spread||e.spread))&&(a=Math.ceil(a/4)*4);const l=n.createTracker(r);l.move(o+" ".repeat(a-o.length)),l.shift(a);const s=n.enter("listItem"),u=n.indentLines(n.containerFlow(e,l.current()),c);return s(),u;function c(f,d,h){return d?(h?"":" ".repeat(a))+f:(h?o:o+" ".repeat(a-o.length))+f}}function c3(e,t,n,r){const i=n.enter("paragraph"),o=n.enter("phrasing"),a=n.containerPhrasing(e,r);return o(),i(),a}const f3=mf(["break","delete","emphasis","footnote","footnoteReference","image","imageReference","inlineCode","inlineMath","link","linkReference","mdxJsxTextElement","mdxTextExpression","strong","text","textDirective"]);function d3(e,t,n,r){return(e.children.some(function(a){return f3(a)})?n.containerPhrasing:n.containerFlow).call(n,e,r)}function h3(e){const t=e.options.strong||"*";if(t!=="*"&&t!=="_")throw new Error("Cannot serialize strong with `"+t+"` for `options.strong`, expected `*`, or `_`");return t}DC.peek=m3;function DC(e,t,n,r){const i=h3(n),o=n.enter("strong"),a=n.createTracker(r),l=a.move(i+i);let s=a.move(n.containerPhrasing(e,{after:i,before:l,...a.current()}));const u=s.charCodeAt(0),c=gc(r.before.charCodeAt(r.before.length-1),u,i);c.inside&&(s=es(u)+s.slice(1));const f=s.charCodeAt(s.length-1),d=gc(r.after.charCodeAt(0),f,i);d.inside&&(s=s.slice(0,-1)+es(f));const h=a.move(i+i);return o(),n.attentionEncodeSurroundingInfo={after:d.outside,before:c.outside},l+s+h}function m3(e,t,n){return n.options.strong||"*"}function p3(e,t,n,r){return n.safe(e.value,r)}function g3(e){const t=e.options.ruleRepetition||3;if(t<3)throw new Error("Cannot serialize rules with repetition `"+t+"` for `options.ruleRepetition`, expected `3` or more");return t}function v3(e,t,n){const r=(zC(n)+(n.options.ruleSpaces?" ":"")).repeat(g3(n));return n.options.ruleSpaces?r.slice(0,-1):r}const jC={blockquote:BD,break:a1,code:KD,definition:GD,emphasis:$C,hardBreak:a1,heading:ZD,html:RC,image:MC,imageReference:OC,inlineCode:NC,link:FC,linkReference:LC,list:l3,listItem:u3,paragraph:c3,root:d3,strong:DC,text:p3,thematicBreak:v3};function y3(){return{enter:{table:b3,tableData:l1,tableHeader:l1,tableRow:x3},exit:{codeText:S3,table:w3,tableData:Sd,tableHeader:Sd,tableRow:Sd}}}function b3(e){const t=e._align;this.enter({type:"table",align:t.map(function(n){return n==="none"?null:n}),children:[]},e),this.data.inTable=!0}function w3(e){this.exit(e),this.data.inTable=void 0}function x3(e){this.enter({type:"tableRow",children:[]},e)}function Sd(e){this.exit(e)}function l1(e){this.enter({type:"tableCell",children:[]},e)}function S3(e){let t=this.resume();this.data.inTable&&(t=t.replace(/\\([\\|])/g,C3));const n=this.stack[this.stack.length-1];n.type,n.value=t,this.exit(e)}function C3(e,t){return t==="|"?t:e}function k3(e){const t=e||{},n=t.tableCellPadding,r=t.tablePipeAlign,i=t.stringLength,o=n?" ":"|";return{unsafe:[{character:"\r",inConstruct:"tableCell"},{character:`
219
  `,inConstruct:"tableCell"},{atBreak:!0,character:"|",after:"[ :-]"},{character:"|",inConstruct:"tableCell"},{atBreak:!0,character:":",after:"-"},{atBreak:!0,character:"-",after:"[:|-]"}],handlers:{inlineCode:d,table:a,tableCell:s,tableRow:l}};function a(h,g,y,w){return u(c(h,y,w),h.align)}function l(h,g,y,w){const v=f(h,y,w),p=u([v]);return p.slice(0,p.indexOf(`
220
- `))}function s(h,g,y,w){const v=y.enter("tableCell"),p=y.enter("phrasing"),b=y.containerPhrasing(h,{...w,before:o,after:o});return p(),v(),b}function u(h,g){return DD(h,{align:g,alignDelimiters:r,padding:n,stringLength:i})}function c(h,g,y){const w=h.children;let v=-1;const p=[],b=g.enter("table");for(;++v<w.length;)p[v]=f(w[v],g,y);return b(),p}function f(h,g,y){const w=h.children;let v=-1;const p=[],b=g.enter("tableRow");for(;++v<w.length;)p[v]=s(w[v],h,g,y);return b(),p}function d(h,g,y){let w=jC.inlineCode(h,g,y);return y.stack.includes("tableCell")&&(w=w.replace(/\|/g,"\\$&")),w}}function E3(){return{exit:{taskListCheckValueChecked:s1,taskListCheckValueUnchecked:s1,paragraph:P3}}}function _3(){return{unsafe:[{atBreak:!0,character:"-",after:"[:|-]"}],handlers:{listItem:I3}}}function s1(e){const t=this.stack[this.stack.length-2];t.type,t.checked=e.type==="taskListCheckValueChecked"}function P3(e){const t=this.stack[this.stack.length-2];if(t&&t.type==="listItem"&&typeof t.checked=="boolean"){const n=this.stack[this.stack.length-1];n.type;const r=n.children[0];if(r&&r.type==="text"){const i=t.children;let o=-1,a;for(;++o<i.length;){const l=i[o];if(l.type==="paragraph"){a=l;break}}a===n&&(r.value=r.value.slice(1),r.value.length===0?n.children.shift():n.position&&r.position&&typeof r.position.start.offset=="number"&&(r.position.start.column++,r.position.start.offset++,n.position.start=Object.assign({},r.position.start)))}}this.exit(e)}function I3(e,t,n,r){const i=e.children[0],o=typeof e.checked=="boolean"&&i&&i.type==="paragraph",a="["+(e.checked?"x":" ")+"] ",l=n.createTracker(r);o&&l.move(a);let s=jC.listItem(e,t,n,{...r,...l.current()});return o&&(s=s.replace(/^(?:[*+-]|\d+\.)([\r\n]| {1,3})/,u)),s;function u(c){return c+a}}function T3(){return[sD(),TD(),OD(),y3(),E3()]}function $3(e){return{extensions:[uD(),$D(e),ND(),k3(e),_3()]}}const R3={tokenize:L3,partial:!0},BC={tokenize:z3,partial:!0},HC={tokenize:D3,partial:!0},VC={tokenize:j3,partial:!0},M3={tokenize:B3,partial:!0},WC={name:"wwwAutolink",tokenize:A3,previous:KC},UC={name:"protocolAutolink",tokenize:F3,previous:qC},di={name:"emailAutolink",tokenize:N3,previous:GC},Yr={};function O3(){return{text:Yr}}let to=48;for(;to<123;)Yr[to]=di,to++,to===58?to=65:to===91&&(to=97);Yr[43]=di;Yr[45]=di;Yr[46]=di;Yr[95]=di;Yr[72]=[di,UC];Yr[104]=[di,UC];Yr[87]=[di,WC];Yr[119]=[di,WC];function N3(e,t,n){const r=this;let i,o;return a;function a(f){return!km(f)||!GC.call(r,r.previous)||mg(r.events)?n(f):(e.enter("literalAutolink"),e.enter("literalAutolinkEmail"),l(f))}function l(f){return km(f)?(e.consume(f),l):f===64?(e.consume(f),s):n(f)}function s(f){return f===46?e.check(M3,c,u)(f):f===45||f===95||vn(f)?(o=!0,e.consume(f),s):c(f)}function u(f){return e.consume(f),i=!0,s}function c(f){return o&&i&&Cn(r.previous)?(e.exit("literalAutolinkEmail"),e.exit("literalAutolink"),t(f)):n(f)}}function A3(e,t,n){const r=this;return i;function i(a){return a!==87&&a!==119||!KC.call(r,r.previous)||mg(r.events)?n(a):(e.enter("literalAutolink"),e.enter("literalAutolinkWww"),e.check(R3,e.attempt(BC,e.attempt(HC,o),n),n)(a))}function o(a){return e.exit("literalAutolinkWww"),e.exit("literalAutolink"),t(a)}}function F3(e,t,n){const r=this;let i="",o=!1;return a;function a(f){return(f===72||f===104)&&qC.call(r,r.previous)&&!mg(r.events)?(e.enter("literalAutolink"),e.enter("literalAutolinkHttp"),i+=String.fromCodePoint(f),e.consume(f),l):n(f)}function l(f){if(Cn(f)&&i.length<5)return i+=String.fromCodePoint(f),e.consume(f),l;if(f===58){const d=i.toLowerCase();if(d==="http"||d==="https")return e.consume(f),s}return n(f)}function s(f){return f===47?(e.consume(f),o?u:(o=!0,s)):n(f)}function u(f){return f===null||hc(f)||dt(f)||xo(f)||ff(f)?n(f):e.attempt(BC,e.attempt(HC,c),n)(f)}function c(f){return e.exit("literalAutolinkHttp"),e.exit("literalAutolink"),t(f)}}function L3(e,t,n){let r=0;return i;function i(a){return(a===87||a===119)&&r<3?(r++,e.consume(a),i):a===46&&r===3?(e.consume(a),o):n(a)}function o(a){return a===null?n(a):t(a)}}function z3(e,t,n){let r,i,o;return a;function a(u){return u===46||u===95?e.check(VC,s,l)(u):u===null||dt(u)||xo(u)||u!==45&&ff(u)?s(u):(o=!0,e.consume(u),a)}function l(u){return u===95?r=!0:(i=r,r=void 0),e.consume(u),a}function s(u){return i||r||!o?n(u):t(u)}}function D3(e,t){let n=0,r=0;return i;function i(a){return a===40?(n++,e.consume(a),i):a===41&&r<n?o(a):a===33||a===34||a===38||a===39||a===41||a===42||a===44||a===46||a===58||a===59||a===60||a===63||a===93||a===95||a===126?e.check(VC,t,o)(a):a===null||dt(a)||xo(a)?t(a):(e.consume(a),i)}function o(a){return a===41&&r++,e.consume(a),i}}function j3(e,t,n){return r;function r(l){return l===33||l===34||l===39||l===41||l===42||l===44||l===46||l===58||l===59||l===63||l===95||l===126?(e.consume(l),r):l===38?(e.consume(l),o):l===93?(e.consume(l),i):l===60||l===null||dt(l)||xo(l)?t(l):n(l)}function i(l){return l===null||l===40||l===91||dt(l)||xo(l)?t(l):r(l)}function o(l){return Cn(l)?a(l):n(l)}function a(l){return l===59?(e.consume(l),r):Cn(l)?(e.consume(l),a):n(l)}}function B3(e,t,n){return r;function r(o){return e.consume(o),i}function i(o){return vn(o)?n(o):t(o)}}function KC(e){return e===null||e===40||e===42||e===95||e===91||e===93||e===126||dt(e)}function qC(e){return!Cn(e)}function GC(e){return!(e===47||km(e))}function km(e){return e===43||e===45||e===46||e===95||vn(e)}function mg(e){let t=e.length,n=!1;for(;t--;){const r=e[t][1];if((r.type==="labelLink"||r.type==="labelImage")&&!r._balanced){n=!0;break}if(r._gfmAutolinkLiteralWalkedInto){n=!1;break}}return e.length>0&&!n&&(e[e.length-1][1]._gfmAutolinkLiteralWalkedInto=!0),n}const H3={tokenize:Q3,partial:!0};function V3(){return{document:{91:{name:"gfmFootnoteDefinition",tokenize:q3,continuation:{tokenize:G3},exit:X3}},text:{91:{name:"gfmFootnoteCall",tokenize:K3},93:{name:"gfmPotentialFootnoteCall",add:"after",tokenize:W3,resolveTo:U3}}}}function W3(e,t,n){const r=this;let i=r.events.length;const o=r.parser.gfmFootnotes||(r.parser.gfmFootnotes=[]);let a;for(;i--;){const s=r.events[i][1];if(s.type==="labelImage"){a=s;break}if(s.type==="gfmFootnoteCall"||s.type==="labelLink"||s.type==="label"||s.type==="image"||s.type==="link")break}return l;function l(s){if(!a||!a._balanced)return n(s);const u=Ar(r.sliceSerialize({start:a.end,end:r.now()}));return u.codePointAt(0)!==94||!o.includes(u.slice(1))?n(s):(e.enter("gfmFootnoteCallLabelMarker"),e.consume(s),e.exit("gfmFootnoteCallLabelMarker"),t(s))}}function U3(e,t){let n=e.length;for(;n--;)if(e[n][1].type==="labelImage"&&e[n][0]==="enter"){e[n][1];break}e[n+1][1].type="data",e[n+3][1].type="gfmFootnoteCallLabelMarker";const r={type:"gfmFootnoteCall",start:Object.assign({},e[n+3][1].start),end:Object.assign({},e[e.length-1][1].end)},i={type:"gfmFootnoteCallMarker",start:Object.assign({},e[n+3][1].end),end:Object.assign({},e[n+3][1].end)};i.end.column++,i.end.offset++,i.end._bufferIndex++;const o={type:"gfmFootnoteCallString",start:Object.assign({},i.end),end:Object.assign({},e[e.length-1][1].start)},a={type:"chunkString",contentType:"string",start:Object.assign({},o.start),end:Object.assign({},o.end)},l=[e[n+1],e[n+2],["enter",r,t],e[n+3],e[n+4],["enter",i,t],["exit",i,t],["enter",o,t],["enter",a,t],["exit",a,t],["exit",o,t],e[e.length-2],e[e.length-1],["exit",r,t]];return e.splice(n,e.length-n+1,...l),e}function K3(e,t,n){const r=this,i=r.parser.gfmFootnotes||(r.parser.gfmFootnotes=[]);let o=0,a;return l;function l(f){return e.enter("gfmFootnoteCall"),e.enter("gfmFootnoteCallLabelMarker"),e.consume(f),e.exit("gfmFootnoteCallLabelMarker"),s}function s(f){return f!==94?n(f):(e.enter("gfmFootnoteCallMarker"),e.consume(f),e.exit("gfmFootnoteCallMarker"),e.enter("gfmFootnoteCallString"),e.enter("chunkString").contentType="string",u)}function u(f){if(o>999||f===93&&!a||f===null||f===91||dt(f))return n(f);if(f===93){e.exit("chunkString");const d=e.exit("gfmFootnoteCallString");return i.includes(Ar(r.sliceSerialize(d)))?(e.enter("gfmFootnoteCallLabelMarker"),e.consume(f),e.exit("gfmFootnoteCallLabelMarker"),e.exit("gfmFootnoteCall"),t):n(f)}return dt(f)||(a=!0),o++,e.consume(f),f===92?c:u}function c(f){return f===91||f===92||f===93?(e.consume(f),o++,u):u(f)}}function q3(e,t,n){const r=this,i=r.parser.gfmFootnotes||(r.parser.gfmFootnotes=[]);let o,a=0,l;return s;function s(g){return e.enter("gfmFootnoteDefinition")._container=!0,e.enter("gfmFootnoteDefinitionLabel"),e.enter("gfmFootnoteDefinitionLabelMarker"),e.consume(g),e.exit("gfmFootnoteDefinitionLabelMarker"),u}function u(g){return g===94?(e.enter("gfmFootnoteDefinitionMarker"),e.consume(g),e.exit("gfmFootnoteDefinitionMarker"),e.enter("gfmFootnoteDefinitionLabelString"),e.enter("chunkString").contentType="string",c):n(g)}function c(g){if(a>999||g===93&&!l||g===null||g===91||dt(g))return n(g);if(g===93){e.exit("chunkString");const y=e.exit("gfmFootnoteDefinitionLabelString");return o=Ar(r.sliceSerialize(y)),e.enter("gfmFootnoteDefinitionLabelMarker"),e.consume(g),e.exit("gfmFootnoteDefinitionLabelMarker"),e.exit("gfmFootnoteDefinitionLabel"),d}return dt(g)||(l=!0),a++,e.consume(g),g===92?f:c}function f(g){return g===91||g===92||g===93?(e.consume(g),a++,c):c(g)}function d(g){return g===58?(e.enter("definitionMarker"),e.consume(g),e.exit("definitionMarker"),i.includes(o)||i.push(o),Le(e,h,"gfmFootnoteDefinitionWhitespace")):n(g)}function h(g){return t(g)}}function G3(e,t,n){return e.check(ws,t,e.attempt(H3,t,n))}function X3(e){e.exit("gfmFootnoteDefinition")}function Q3(e,t,n){const r=this;return Le(e,i,"gfmFootnoteDefinitionIndent",4+1);function i(o){const a=r.events[r.events.length-1];return a&&a[1].type==="gfmFootnoteDefinitionIndent"&&a[2].sliceSerialize(a[1],!0).length===4?t(o):n(o)}}function Y3(e){let n=(e||{}).singleTilde;const r={name:"strikethrough",tokenize:o,resolveAll:i};return n==null&&(n=!0),{text:{126:r},insideSpan:{null:[r]},attentionMarkers:{null:[126]}};function i(a,l){let s=-1;for(;++s<a.length;)if(a[s][0]==="enter"&&a[s][1].type==="strikethroughSequenceTemporary"&&a[s][1]._close){let u=s;for(;u--;)if(a[u][0]==="exit"&&a[u][1].type==="strikethroughSequenceTemporary"&&a[u][1]._open&&a[s][1].end.offset-a[s][1].start.offset===a[u][1].end.offset-a[u][1].start.offset){a[s][1].type="strikethroughSequence",a[u][1].type="strikethroughSequence";const c={type:"strikethrough",start:Object.assign({},a[u][1].start),end:Object.assign({},a[s][1].end)},f={type:"strikethroughText",start:Object.assign({},a[u][1].end),end:Object.assign({},a[s][1].start)},d=[["enter",c,l],["enter",a[u][1],l],["exit",a[u][1],l],["enter",f,l]],h=l.parser.constructs.insideSpan.null;h&&Qn(d,d.length,0,df(h,a.slice(u+1,s),l)),Qn(d,d.length,0,[["exit",f,l],["enter",a[s][1],l],["exit",a[s][1],l],["exit",c,l]]),Qn(a,u-1,s-u+3,d),s=u+d.length-2;break}}for(s=-1;++s<a.length;)a[s][1].type==="strikethroughSequenceTemporary"&&(a[s][1].type="data");return a}function o(a,l,s){const u=this.previous,c=this.events;let f=0;return d;function d(g){return u===126&&c[c.length-1][1].type!=="characterEscape"?s(g):(a.enter("strikethroughSequenceTemporary"),h(g))}function h(g){const y=ka(u);if(g===126)return f>1?s(g):(a.consume(g),f++,h);if(f<2&&!n)return s(g);const w=a.exit("strikethroughSequenceTemporary"),v=ka(g);return w._open=!v||v===2&&!!y,w._close=!y||y===2&&!!v,l(g)}}}class Z3{constructor(){this.map=[]}add(t,n,r){J3(this,t,n,r)}consume(t){if(this.map.sort(function(o,a){return o[0]-a[0]}),this.map.length===0)return;let n=this.map.length;const r=[];for(;n>0;)n-=1,r.push(t.slice(this.map[n][0]+this.map[n][1]),this.map[n][2]),t.length=this.map[n][0];r.push(t.slice()),t.length=0;let i=r.pop();for(;i;){for(const o of i)t.push(o);i=r.pop()}this.map.length=0}}function J3(e,t,n,r){let i=0;if(!(n===0&&r.length===0)){for(;i<e.map.length;){if(e.map[i][0]===t){e.map[i][1]+=n,e.map[i][2].push(...r);return}i+=1}e.map.push([t,n,r])}}function e5(e,t){let n=!1;const r=[];for(;t<e.length;){const i=e[t];if(n){if(i[0]==="enter")i[1].type==="tableContent"&&r.push(e[t+1][1].type==="tableDelimiterMarker"?"left":"none");else if(i[1].type==="tableContent"){if(e[t-1][1].type==="tableDelimiterMarker"){const o=r.length-1;r[o]=r[o]==="left"?"center":"right"}}else if(i[1].type==="tableDelimiterRow")break}else i[0]==="enter"&&i[1].type==="tableDelimiterRow"&&(n=!0);t+=1}return r}function t5(){return{flow:{null:{name:"table",tokenize:n5,resolveAll:r5}}}}function n5(e,t,n){const r=this;let i=0,o=0,a;return l;function l(k){let $=r.events.length-1;for(;$>-1;){const N=r.events[$][1].type;if(N==="lineEnding"||N==="linePrefix")$--;else break}const R=$>-1?r.events[$][1].type:null,M=R==="tableHead"||R==="tableRow"?x:s;return M===x&&r.parser.lazy[r.now().line]?n(k):M(k)}function s(k){return e.enter("tableHead"),e.enter("tableRow"),u(k)}function u(k){return k===124||(a=!0,o+=1),c(k)}function c(k){return k===null?n(k):fe(k)?o>1?(o=0,r.interrupt=!0,e.exit("tableRow"),e.enter("lineEnding"),e.consume(k),e.exit("lineEnding"),h):n(k):je(k)?Le(e,c,"whitespace")(k):(o+=1,a&&(a=!1,i+=1),k===124?(e.enter("tableCellDivider"),e.consume(k),e.exit("tableCellDivider"),a=!0,c):(e.enter("data"),f(k)))}function f(k){return k===null||k===124||dt(k)?(e.exit("data"),c(k)):(e.consume(k),k===92?d:f)}function d(k){return k===92||k===124?(e.consume(k),f):f(k)}function h(k){return r.interrupt=!1,r.parser.lazy[r.now().line]?n(k):(e.enter("tableDelimiterRow"),a=!1,je(k)?Le(e,g,"linePrefix",r.parser.constructs.disable.null.includes("codeIndented")?void 0:4)(k):g(k))}function g(k){return k===45||k===58?w(k):k===124?(a=!0,e.enter("tableCellDivider"),e.consume(k),e.exit("tableCellDivider"),y):C(k)}function y(k){return je(k)?Le(e,w,"whitespace")(k):w(k)}function w(k){return k===58?(o+=1,a=!0,e.enter("tableDelimiterMarker"),e.consume(k),e.exit("tableDelimiterMarker"),v):k===45?(o+=1,v(k)):k===null||fe(k)?S(k):C(k)}function v(k){return k===45?(e.enter("tableDelimiterFiller"),p(k)):C(k)}function p(k){return k===45?(e.consume(k),p):k===58?(a=!0,e.exit("tableDelimiterFiller"),e.enter("tableDelimiterMarker"),e.consume(k),e.exit("tableDelimiterMarker"),b):(e.exit("tableDelimiterFiller"),b(k))}function b(k){return je(k)?Le(e,S,"whitespace")(k):S(k)}function S(k){return k===124?g(k):k===null||fe(k)?!a||i!==o?C(k):(e.exit("tableDelimiterRow"),e.exit("tableHead"),t(k)):C(k)}function C(k){return n(k)}function x(k){return e.enter("tableRow"),_(k)}function _(k){return k===124?(e.enter("tableCellDivider"),e.consume(k),e.exit("tableCellDivider"),_):k===null||fe(k)?(e.exit("tableRow"),t(k)):je(k)?Le(e,_,"whitespace")(k):(e.enter("data"),I(k))}function I(k){return k===null||k===124||dt(k)?(e.exit("data"),_(k)):(e.consume(k),k===92?T:I)}function T(k){return k===92||k===124?(e.consume(k),I):I(k)}}function r5(e,t){let n=-1,r=!0,i=0,o=[0,0,0,0],a=[0,0,0,0],l=!1,s=0,u,c,f;const d=new Z3;for(;++n<e.length;){const h=e[n],g=h[1];h[0]==="enter"?g.type==="tableHead"?(l=!1,s!==0&&(u1(d,t,s,u,c),c=void 0,s=0),u={type:"table",start:Object.assign({},g.start),end:Object.assign({},g.end)},d.add(n,0,[["enter",u,t]])):g.type==="tableRow"||g.type==="tableDelimiterRow"?(r=!0,f=void 0,o=[0,0,0,0],a=[0,n+1,0,0],l&&(l=!1,c={type:"tableBody",start:Object.assign({},g.start),end:Object.assign({},g.end)},d.add(n,0,[["enter",c,t]])),i=g.type==="tableDelimiterRow"?2:c?3:1):i&&(g.type==="data"||g.type==="tableDelimiterMarker"||g.type==="tableDelimiterFiller")?(r=!1,a[2]===0&&(o[1]!==0&&(a[0]=a[1],f=iu(d,t,o,i,void 0,f),o=[0,0,0,0]),a[2]=n)):g.type==="tableCellDivider"&&(r?r=!1:(o[1]!==0&&(a[0]=a[1],f=iu(d,t,o,i,void 0,f)),o=a,a=[o[1],n,0,0])):g.type==="tableHead"?(l=!0,s=n):g.type==="tableRow"||g.type==="tableDelimiterRow"?(s=n,o[1]!==0?(a[0]=a[1],f=iu(d,t,o,i,n,f)):a[1]!==0&&(f=iu(d,t,a,i,n,f)),i=0):i&&(g.type==="data"||g.type==="tableDelimiterMarker"||g.type==="tableDelimiterFiller")&&(a[3]=n)}for(s!==0&&u1(d,t,s,u,c),d.consume(t.events),n=-1;++n<t.events.length;){const h=t.events[n];h[0]==="enter"&&h[1].type==="table"&&(h[1]._align=e5(t.events,n))}return e}function iu(e,t,n,r,i,o){const a=r===1?"tableHeader":r===2?"tableDelimiter":"tableData",l="tableContent";n[0]!==0&&(o.end=Object.assign({},Bo(t.events,n[0])),e.add(n[0],0,[["exit",o,t]]));const s=Bo(t.events,n[1]);if(o={type:a,start:Object.assign({},s),end:Object.assign({},s)},e.add(n[1],0,[["enter",o,t]]),n[2]!==0){const u=Bo(t.events,n[2]),c=Bo(t.events,n[3]),f={type:l,start:Object.assign({},u),end:Object.assign({},c)};if(e.add(n[2],0,[["enter",f,t]]),r!==2){const d=t.events[n[2]],h=t.events[n[3]];if(d[1].end=Object.assign({},h[1].end),d[1].type="chunkText",d[1].contentType="text",n[3]>n[2]+1){const g=n[2]+1,y=n[3]-n[2]-1;e.add(g,y,[])}}e.add(n[3]+1,0,[["exit",f,t]])}return i!==void 0&&(o.end=Object.assign({},Bo(t.events,i)),e.add(i,0,[["exit",o,t]]),o=void 0),o}function u1(e,t,n,r,i){const o=[],a=Bo(t.events,n);i&&(i.end=Object.assign({},a),o.push(["exit",i,t])),r.end=Object.assign({},a),o.push(["exit",r,t]),e.add(n+1,0,o)}function Bo(e,t){const n=e[t],r=n[0]==="enter"?"start":"end";return n[1][r]}const i5={name:"tasklistCheck",tokenize:a5};function o5(){return{text:{91:i5}}}function a5(e,t,n){const r=this;return i;function i(s){return r.previous!==null||!r._gfmTasklistFirstContentOfListItem?n(s):(e.enter("taskListCheck"),e.enter("taskListCheckMarker"),e.consume(s),e.exit("taskListCheckMarker"),o)}function o(s){return dt(s)?(e.enter("taskListCheckValueUnchecked"),e.consume(s),e.exit("taskListCheckValueUnchecked"),a):s===88||s===120?(e.enter("taskListCheckValueChecked"),e.consume(s),e.exit("taskListCheckValueChecked"),a):n(s)}function a(s){return s===93?(e.enter("taskListCheckMarker"),e.consume(s),e.exit("taskListCheckMarker"),e.exit("taskListCheck"),l):n(s)}function l(s){return fe(s)?t(s):je(s)?e.check({tokenize:l5},t,n)(s):n(s)}}function l5(e,t,n){return Le(e,r,"whitespace");function r(i){return i===null?n(i):t(i)}}function s5(e){return nC([O3(),V3(),Y3(e),t5(),o5()])}const u5={};function c5(e){const t=this,n=e||u5,r=t.data(),i=r.micromarkExtensions||(r.micromarkExtensions=[]),o=r.fromMarkdownExtensions||(r.fromMarkdownExtensions=[]),a=r.toMarkdownExtensions||(r.toMarkdownExtensions=[]);i.push(s5(n)),o.push(T3()),a.push($3(n))}function f5(e){EC(e,[/\r?\n|\r/g,d5])}function d5(){return{type:"break"}}function h5(){return function(e){f5(e)}}const m5="_markdown_3p4mr_1",p5={markdown:m5},vc=({message:e,customClass:t={}})=>te("div",{className:oe(p5.markdown,t),children:te(H4,{children:e||"",remarkPlugins:[c5,rD,h5]})}),XC=e=>te(Ti,{children:e.reports&&Array.isArray(e.reports)&&e.reports.length>0&&te("div",{className:oe(Vr.reportContent,(e==null?void 0:e.isOnlyReport)&&Vr.onlyReportContent),children:e.reports.map(t=>Ot(Ti,{children:[Ot("div",{className:Vr.title,children:[t.title,t.level&&te(Ti,{children:t.level==="low"?te("div",{className:oe(Vr.icon,Vr.success),children:"ไฝŽๅฑ"}):t.level==="medium"?te("div",{className:oe(Vr.icon,Vr.warning),children:"ไธญๅฑ"}):te("div",{className:oe(Vr.icon,Vr.fail),children:"้ซ˜ๅฑ"})})]}),te("div",{className:Vr.has_issues,children:te(vc,{message:t.issues})})]}))})}),c1=e=>{var t;return Ot("div",{className:On.sContainer,children:[e.executiveSummary&&Ot("div",{className:On.summary,children:[te("span",{className:On.title,children:"ๆ‰ง่กŒๆ‘˜่ฆ"}),te(vc,{message:e.executiveSummary,customClass:On.yellowWords})]}),te("div",{className:On.content,children:(t=e==null?void 0:e.statistics)==null?void 0:t.map(n=>Ot("div",{className:On.box,children:[te("div",{className:On.label,children:n.label}),te("div",{style:{color:n.colorClass,fontWeight:600,fontSize:48},children:n.value})]}))})]})},f1=e=>{var n;let t=[];return Array.isArray(e==null?void 0:e.issues)&&(e==null?void 0:e.issues.length)>0&&(t=(n=e.issues)==null?void 0:n.map(r=>({title:r.issue_name,issues:r.issues,level:r.severity}))),!t||t.length===0?Ot("div",{className:On.noResult,children:[te("span",{children:"ๆš‚ๆ— ๆต‹่ฏ•็ป“ๆžœ"}),te(Xp,{className:On.noIcon})]}):te("div",{className:On.sContainer,children:te(XC,{reports:t,isOnlyReport:!0})})},g5=e=>{console.log("props:",e);const t=m.useMemo(()=>Array.isArray(e.tabs)?e.tabs.map(n=>{var r,i,o,a;return(n==null?void 0:n.id)!=="subtab-summary-advice"?{label:Ot("div",{className:On.issuesBox,children:[n.title,te("div",{className:oe(On.issuesCount,((i=(r=n.content)==null?void 0:r.issues)==null?void 0:i.length)&&On.show),children:(a=(o=n.content)==null?void 0:o.issues)==null?void 0:a.length})]}),key:n.title,title:n.title,children:(n==null?void 0:n.id)==="subtab-summary-advice"?te(c1,{...n.content}):te(f1,{...n.content})}:{label:n.title,key:n.title,title:n.title,children:(n==null?void 0:n.id)==="subtab-summary-advice"?te(c1,{...n.content}):te(f1,{...n.content})}}):[],[e]);return t?te("div",{className:On.container,children:te(qp,{type:"card",defaultActiveKey:"1",items:t})}):te(Ti,{})};var $r=(e=>(e.passed="passed",e.warning="warning",e.failed="failed",e))($r||{});const v5="_container_t5k03_1",y5="_sContainer_t5k03_6",b5="_subContainer_t5k03_10",w5="_noResult_t5k03_15",x5="_noIcon_t5k03_36",S5="_badge_t5k03_39",C5="_badgeSuccess_t5k03_47",k5="_badgeFailure_t5k03_51",E5="_badgeWarning_t5k03_55",cr={container:v5,sContainer:y5,subContainer:b5,noResult:w5,noIcon:x5,badge:S5,badgeSuccess:C5,badgeFailure:k5,badgeWarning:E5},_5="_container_1kd9e_17",P5="_badge_1kd9e_23",I5="_badgeSuccess_1kd9e_31",T5="_badgeFailure_1kd9e_35",$5="_badgeWarning_1kd9e_39",R5="_screenshots_1kd9e_43",M5="_screenshotWrapper_1kd9e_64",O5="_screenshotTitle_1kd9e_71",N5="_screenshot_1kd9e_43",A5="_left_1kd9e_82",F5="_stepItem_1kd9e_108",L5="_stepNumber_1kd9e_120",z5="_stepDescription_1kd9e_127",D5="_stepTime_1kd9e_131",j5="_activeItem_1kd9e_136",B5="_right_1kd9e_142",H5="_actionsList_1kd9e_165",V5="_actionCard_1kd9e_168",W5="_success_1kd9e_175",U5="_failure_1kd9e_179",K5="_actionHeader_1kd9e_183",q5="_actionTitle_1kd9e_190",G5="_actionResult_1kd9e_193",X5="_actionResultSuccess_1kd9e_197",Q5="_actionResultFailure_1kd9e_200",Y5="_modelIOTitle_1kd9e_203",gt={"diy-scrollbar":"_diy-scrollbar_1kd9e_1",container:_5,badge:P5,badgeSuccess:I5,badgeFailure:T5,badgeWarning:$5,screenshots:R5,screenshotWrapper:M5,screenshotTitle:O5,screenshot:N5,left:A5,stepItem:F5,stepNumber:L5,stepDescription:z5,stepTime:D5,activeItem:j5,right:B5,actionsList:H5,actionCard:V5,success:W5,failure:U5,actionHeader:K5,actionTitle:q5,actionResult:G5,actionResultSuccess:X5,actionResultFailure:Q5,modelIOTitle:Y5},Z5=e=>{var r,i,o;const[t,n]=m.useState((r=e.testStep)==null?void 0:r[0]);return console.log("activeStep:",t),!e.testStep||Array.isArray(e.testStep)&&e.testStep.length===0?te(Ti,{}):Ot("div",{className:gt.container,children:[te("div",{className:gt.left,children:(i=e==null?void 0:e.testStep)==null?void 0:i.map(a=>Ot("div",{className:oe(gt.stepItem,t.id===a.id&&gt.activeItem),onClick:()=>{n(a)},children:[Ot("div",{className:gt.stepNumber,children:["ๆญฅ้ชค ",a.id,te("span",{className:oe(gt.badge,a.status===$r.passed?gt.badgeSuccess:a.status===$r.failed?gt.badgeFailure:a.status===$r.warning?gt.badgeWarning:gt.hide),children:a.status===$r.passed?"้€š่ฟ‡":a.status===$r.failed?"ไธ้€š่ฟ‡":"่ญฆๅ‘Š"})]}),te("div",{className:gt.stepDescription,children:te(vc,{message:a.description})})]}))}),te("div",{className:gt.screenshots,children:(o=t==null?void 0:t.screenshots)==null?void 0:o.map(a=>te("div",{className:gt.screenshotWrapper,children:te("img",{className:gt.screenshot,src:a.data})}))}),(Array.isArray(t.actions)&&t.actions.length>0||t.modelIO)&&Ot("div",{className:gt.right,children:[Array.isArray(t.actions)&&t.actions.length>0&&Ot("div",{className:gt.actionsList,children:[te("div",{className:gt.modelIOTitle,children:"ๆ‰ง่กŒๅŠจไฝœ"}),t.actions.map(a=>te("div",{className:oe(gt.actionCard,gt.success),"data-step":"0","data-case":"0",children:Ot("div",{className:gt.actionHeader,children:[te("div",{className:gt.actionTitle,children:a.index}),te("div",{className:gt.description,children:a.description}),te("div",{className:oe(gt.actionResult,gt.success),children:a.success?"โœ“ ๆˆๅŠŸ":"ร— ๅคฑ่ดฅ"})]})}))]}),t.modelIO&&Ot(Ti,{children:[te("div",{className:gt.modelIOTitle,children:"ๆจกๅž‹่พ“ๅ‡บ"}),te(vc,{message:t.modelIO})]})]})]})},J5=e=>{var t,n;return Ot("div",{className:cr.subContainer,children:[te(XC,{reports:e.report||[],isOnlyReport:Array.isArray(e.steps)&&((t=e.steps)==null?void 0:t.length)==0}),e.steps&&Array.isArray(e.steps)&&((n=e.steps)==null?void 0:n.length)>0&&te(Z5,{testStep:e.steps})]})},e6=e=>{const[t,n]=m.useState(""),r=m.useMemo(()=>{var i,o;return Array.isArray(e==null?void 0:e.subTest)&&(e==null?void 0:e.subTest.length)>0?(n((o=(i=e==null?void 0:e.subTest)==null?void 0:i[0])==null?void 0:o.name),e==null?void 0:e.subTest.map(a=>({label:Ot("div",{children:[" ",a.name," ",te("span",{className:oe(cr.badge,a.status===$r.passed?cr.badgeSuccess:a.status===$r.failed?cr.badgeFailure:a.status===$r.warning?cr.badgeWarning:cr.hide),children:a.status===$r.passed?"้€š่ฟ‡":a.status===$r.failed?"ไธ้€š่ฟ‡":"่ญฆๅ‘Š"})]}),key:a.name,title:a.name,children:te(J5,{...a})}))):[]},[e]);return!r||r.length===0?Ot("div",{className:cr.noResult,children:[te("span",{children:"ๆš‚ๆ— ๆต‹่ฏ•็ป“ๆžœ"}),te(Xp,{className:cr.noIcon})]}):te("div",{className:cr.sContainer,children:te(qp,{size:"small",type:"card",defaultActiveKey:t,items:r})})},t6=e=>{const[t,n]=m.useState(""),r=m.useMemo(()=>{var i,o;return Array.isArray(e.items)&&((i=e.items)==null?void 0:i.length)>0?(n((o=e.items)==null?void 0:o[0].test_id),e.items.map(a=>({label:a.test_name,key:a.test_id,title:a.test_name,children:te(e6,{subTest:a.sub_tests})}))):[]},[e.items]);return!r||r.length===0?Ot("div",{className:cr.noResult,children:[te("span",{children:"ๆš‚ๆ— ๆต‹่ฏ•็ป“ๆžœ"}),te(Xp,{className:cr.noIcon})]}):te("div",{className:cr.container,children:te(qp,{size:"small",type:"card",defaultActiveKey:t,items:r})})},n6="_container_1tejq_1",r6="_name_1tejq_5",i6={container:n6,name:r6},d1=({title:e,children:t})=>te("div",{className:i6.container,children:t});function o6(){const[e,t]=m.useState([]),[n,r]=m.useState("aggregated_results");m.useEffect(()=>{var a,l,s,u,c;let o=[{name:(l=(a=window==null?void 0:window.testResultData)==null?void 0:a.aggregated_results)==null?void 0:l.title,key:"aggregated_results"}];for(let f in(s=window==null?void 0:window.testResultData)==null?void 0:s.test_results)o.push({name:(c=(u=window==null?void 0:window.testResultData)==null?void 0:u.test_results)==null?void 0:c[f].title,key:f});r("aggregated_results"),t(o)},[]);const i=m.useMemo(()=>{var o,a,l,s,u,c;return n==="aggregated_results"?te(d1,{title:(o=window==null?void 0:window.testResultData)==null?void 0:o.aggregated_results.title,children:te(g5,{...(a=window==null?void 0:window.testResultData)==null?void 0:a.aggregated_results})}):te(d1,{title:(s=(l=window==null?void 0:window.testResultData)==null?void 0:l.test_results)==null?void 0:s[n].title,children:te(t6,{...(c=(u=window==null?void 0:window.testResultData)==null?void 0:u.test_results)==null?void 0:c[n]})})},[n]);return window!=null&&window.testResultData?(console.log("menuList:",e,n),Ot("div",{className:Ws.container,children:[te("div",{className:Ws.left,children:te(jN,{children:Ot(Up,{children:[te("div",{className:Ws.webTitle,children:"่ฏ„ไผฐๆŠฅๅ‘Š"}),te(i0,{mode:"inline",defaultSelectedKeys:["aggregated_results"],children:e&&(e==null?void 0:e.length)>0&&e.map(o=>te(i0.Item,{onClick:()=>{r(o.key)},children:te("span",{className:"nav-text",children:o.name})},o.key))})]})})}),te("div",{className:Ws.right,children:i})]})):te(Ti,{})}Cd.createRoot(document.getElementById("root")).render(te(De.StrictMode,{children:te(h2,{theme:{token:{colorBgLayout:"##F9FAFC",colorPrimary:"#1677ff",colorLink:"#1677ff",controlItemBgActive:"#F4F5F9",controlItemBgActiveHover:"#F4F5F9",borderRadius:2,colorFillTertiary:"#F4F5F9",colorFillSecondary:"#F4F5F9",colorErrorBg:"#F4F5F9",colorErrorBgHover:"#F4F5F9"},components:{Select:{activeOutlineColor:"rgba(0, 0, 0, 0)"},Layout:{siderBg:"##F9FAFC"}}},children:te(o6,{})})}));
 
217
  `))+1))}const a="#".repeat(i),l=n.enter("headingAtx"),s=n.enter("phrasing");o.move(a+" ");let u=n.containerPhrasing(e,{before:"# ",after:`
218
  `,...o.current()});return/^[\t ]/.test(u)&&(u=es(u.charCodeAt(0))+u.slice(1)),u=u?a+" "+u:a,n.options.closeAtx&&(u+=" "+a),s(),l(),u}RC.peek=JD;function RC(e){return e.value||""}function JD(){return"<"}MC.peek=e3;function MC(e,t,n,r){const i=dg(n),o=i==='"'?"Quote":"Apostrophe",a=n.enter("image");let l=n.enter("label");const s=n.createTracker(r);let u=s.move("![");return u+=s.move(n.safe(e.alt,{before:u,after:"]",...s.current()})),u+=s.move("]("),l(),!e.url&&e.title||/[\0- \u007F]/.test(e.url)?(l=n.enter("destinationLiteral"),u+=s.move("<"),u+=s.move(n.safe(e.url,{before:u,after:">",...s.current()})),u+=s.move(">")):(l=n.enter("destinationRaw"),u+=s.move(n.safe(e.url,{before:u,after:e.title?" ":")",...s.current()}))),l(),e.title&&(l=n.enter(`title${o}`),u+=s.move(" "+i),u+=s.move(n.safe(e.title,{before:u,after:i,...s.current()})),u+=s.move(i),l()),u+=s.move(")"),a(),u}function e3(){return"!"}OC.peek=t3;function OC(e,t,n,r){const i=e.referenceType,o=n.enter("imageReference");let a=n.enter("label");const l=n.createTracker(r);let s=l.move("![");const u=n.safe(e.alt,{before:s,after:"]",...l.current()});s+=l.move(u+"]["),a();const c=n.stack;n.stack=[],a=n.enter("reference");const f=n.safe(n.associationId(e),{before:s,after:"]",...l.current()});return a(),n.stack=c,o(),i==="full"||!u||u!==f?s+=l.move(f+"]"):i==="shortcut"?s=s.slice(0,-1):s+=l.move("]"),s}function t3(){return"!"}NC.peek=n3;function NC(e,t,n){let r=e.value||"",i="`",o=-1;for(;new RegExp("(^|[^`])"+i+"([^`]|$)").test(r);)i+="`";for(/[^ \r\n]/.test(r)&&(/^[ \r\n]/.test(r)&&/[ \r\n]$/.test(r)||/^`|`$/.test(r))&&(r=" "+r+" ");++o<n.unsafe.length;){const a=n.unsafe[o],l=n.compilePattern(a);let s;if(a.atBreak)for(;s=l.exec(r);){let u=s.index;r.charCodeAt(u)===10&&r.charCodeAt(u-1)===13&&u--,r=r.slice(0,u)+" "+r.slice(s.index+1)}}return i+r+i}function n3(){return"`"}function AC(e,t){const n=rg(e);return!!(!t.options.resourceLink&&e.url&&!e.title&&e.children&&e.children.length===1&&e.children[0].type==="text"&&(n===e.url||"mailto:"+n===e.url)&&/^[a-z][a-z+.-]+:/i.test(e.url)&&!/[\0- <>\u007F]/.test(e.url))}FC.peek=r3;function FC(e,t,n,r){const i=dg(n),o=i==='"'?"Quote":"Apostrophe",a=n.createTracker(r);let l,s;if(AC(e,n)){const c=n.stack;n.stack=[],l=n.enter("autolink");let f=a.move("<");return f+=a.move(n.containerPhrasing(e,{before:f,after:">",...a.current()})),f+=a.move(">"),l(),n.stack=c,f}l=n.enter("link"),s=n.enter("label");let u=a.move("[");return u+=a.move(n.containerPhrasing(e,{before:u,after:"](",...a.current()})),u+=a.move("]("),s(),!e.url&&e.title||/[\0- \u007F]/.test(e.url)?(s=n.enter("destinationLiteral"),u+=a.move("<"),u+=a.move(n.safe(e.url,{before:u,after:">",...a.current()})),u+=a.move(">")):(s=n.enter("destinationRaw"),u+=a.move(n.safe(e.url,{before:u,after:e.title?" ":")",...a.current()}))),s(),e.title&&(s=n.enter(`title${o}`),u+=a.move(" "+i),u+=a.move(n.safe(e.title,{before:u,after:i,...a.current()})),u+=a.move(i),s()),u+=a.move(")"),l(),u}function r3(e,t,n){return AC(e,n)?"<":"["}LC.peek=i3;function LC(e,t,n,r){const i=e.referenceType,o=n.enter("linkReference");let a=n.enter("label");const l=n.createTracker(r);let s=l.move("[");const u=n.containerPhrasing(e,{before:s,after:"]",...l.current()});s+=l.move(u+"]["),a();const c=n.stack;n.stack=[],a=n.enter("reference");const f=n.safe(n.associationId(e),{before:s,after:"]",...l.current()});return a(),n.stack=c,o(),i==="full"||!u||u!==f?s+=l.move(f+"]"):i==="shortcut"?s=s.slice(0,-1):s+=l.move("]"),s}function i3(){return"["}function hg(e){const t=e.options.bullet||"*";if(t!=="*"&&t!=="+"&&t!=="-")throw new Error("Cannot serialize items with `"+t+"` for `options.bullet`, expected `*`, `+`, or `-`");return t}function o3(e){const t=hg(e),n=e.options.bulletOther;if(!n)return t==="*"?"-":"*";if(n!=="*"&&n!=="+"&&n!=="-")throw new Error("Cannot serialize items with `"+n+"` for `options.bulletOther`, expected `*`, `+`, or `-`");if(n===t)throw new Error("Expected `bullet` (`"+t+"`) and `bulletOther` (`"+n+"`) to be different");return n}function a3(e){const t=e.options.bulletOrdered||".";if(t!=="."&&t!==")")throw new Error("Cannot serialize items with `"+t+"` for `options.bulletOrdered`, expected `.` or `)`");return t}function zC(e){const t=e.options.rule||"*";if(t!=="*"&&t!=="-"&&t!=="_")throw new Error("Cannot serialize rules with `"+t+"` for `options.rule`, expected `*`, `-`, or `_`");return t}function l3(e,t,n,r){const i=n.enter("list"),o=n.bulletCurrent;let a=e.ordered?a3(n):hg(n);const l=e.ordered?a==="."?")":".":o3(n);let s=t&&n.bulletLastUsed?a===n.bulletLastUsed:!1;if(!e.ordered){const c=e.children?e.children[0]:void 0;if((a==="*"||a==="-")&&c&&(!c.children||!c.children[0])&&n.stack[n.stack.length-1]==="list"&&n.stack[n.stack.length-2]==="listItem"&&n.stack[n.stack.length-3]==="list"&&n.stack[n.stack.length-4]==="listItem"&&n.indexStack[n.indexStack.length-1]===0&&n.indexStack[n.indexStack.length-2]===0&&n.indexStack[n.indexStack.length-3]===0&&(s=!0),zC(n)===a&&c){let f=-1;for(;++f<e.children.length;){const d=e.children[f];if(d&&d.type==="listItem"&&d.children&&d.children[0]&&d.children[0].type==="thematicBreak"){s=!0;break}}}}s&&(a=l),n.bulletCurrent=a;const u=n.containerFlow(e,r);return n.bulletLastUsed=a,n.bulletCurrent=o,i(),u}function s3(e){const t=e.options.listItemIndent||"one";if(t!=="tab"&&t!=="one"&&t!=="mixed")throw new Error("Cannot serialize items with `"+t+"` for `options.listItemIndent`, expected `tab`, `one`, or `mixed`");return t}function u3(e,t,n,r){const i=s3(n);let o=n.bulletCurrent||hg(n);t&&t.type==="list"&&t.ordered&&(o=(typeof t.start=="number"&&t.start>-1?t.start:1)+(n.options.incrementListMarker===!1?0:t.children.indexOf(e))+o);let a=o.length+1;(i==="tab"||i==="mixed"&&(t&&t.type==="list"&&t.spread||e.spread))&&(a=Math.ceil(a/4)*4);const l=n.createTracker(r);l.move(o+" ".repeat(a-o.length)),l.shift(a);const s=n.enter("listItem"),u=n.indentLines(n.containerFlow(e,l.current()),c);return s(),u;function c(f,d,h){return d?(h?"":" ".repeat(a))+f:(h?o:o+" ".repeat(a-o.length))+f}}function c3(e,t,n,r){const i=n.enter("paragraph"),o=n.enter("phrasing"),a=n.containerPhrasing(e,r);return o(),i(),a}const f3=mf(["break","delete","emphasis","footnote","footnoteReference","image","imageReference","inlineCode","inlineMath","link","linkReference","mdxJsxTextElement","mdxTextExpression","strong","text","textDirective"]);function d3(e,t,n,r){return(e.children.some(function(a){return f3(a)})?n.containerPhrasing:n.containerFlow).call(n,e,r)}function h3(e){const t=e.options.strong||"*";if(t!=="*"&&t!=="_")throw new Error("Cannot serialize strong with `"+t+"` for `options.strong`, expected `*`, or `_`");return t}DC.peek=m3;function DC(e,t,n,r){const i=h3(n),o=n.enter("strong"),a=n.createTracker(r),l=a.move(i+i);let s=a.move(n.containerPhrasing(e,{after:i,before:l,...a.current()}));const u=s.charCodeAt(0),c=gc(r.before.charCodeAt(r.before.length-1),u,i);c.inside&&(s=es(u)+s.slice(1));const f=s.charCodeAt(s.length-1),d=gc(r.after.charCodeAt(0),f,i);d.inside&&(s=s.slice(0,-1)+es(f));const h=a.move(i+i);return o(),n.attentionEncodeSurroundingInfo={after:d.outside,before:c.outside},l+s+h}function m3(e,t,n){return n.options.strong||"*"}function p3(e,t,n,r){return n.safe(e.value,r)}function g3(e){const t=e.options.ruleRepetition||3;if(t<3)throw new Error("Cannot serialize rules with repetition `"+t+"` for `options.ruleRepetition`, expected `3` or more");return t}function v3(e,t,n){const r=(zC(n)+(n.options.ruleSpaces?" ":"")).repeat(g3(n));return n.options.ruleSpaces?r.slice(0,-1):r}const jC={blockquote:BD,break:a1,code:KD,definition:GD,emphasis:$C,hardBreak:a1,heading:ZD,html:RC,image:MC,imageReference:OC,inlineCode:NC,link:FC,linkReference:LC,list:l3,listItem:u3,paragraph:c3,root:d3,strong:DC,text:p3,thematicBreak:v3};function y3(){return{enter:{table:b3,tableData:l1,tableHeader:l1,tableRow:x3},exit:{codeText:S3,table:w3,tableData:Sd,tableHeader:Sd,tableRow:Sd}}}function b3(e){const t=e._align;this.enter({type:"table",align:t.map(function(n){return n==="none"?null:n}),children:[]},e),this.data.inTable=!0}function w3(e){this.exit(e),this.data.inTable=void 0}function x3(e){this.enter({type:"tableRow",children:[]},e)}function Sd(e){this.exit(e)}function l1(e){this.enter({type:"tableCell",children:[]},e)}function S3(e){let t=this.resume();this.data.inTable&&(t=t.replace(/\\([\\|])/g,C3));const n=this.stack[this.stack.length-1];n.type,n.value=t,this.exit(e)}function C3(e,t){return t==="|"?t:e}function k3(e){const t=e||{},n=t.tableCellPadding,r=t.tablePipeAlign,i=t.stringLength,o=n?" ":"|";return{unsafe:[{character:"\r",inConstruct:"tableCell"},{character:`
219
  `,inConstruct:"tableCell"},{atBreak:!0,character:"|",after:"[ :-]"},{character:"|",inConstruct:"tableCell"},{atBreak:!0,character:":",after:"-"},{atBreak:!0,character:"-",after:"[:|-]"}],handlers:{inlineCode:d,table:a,tableCell:s,tableRow:l}};function a(h,g,y,w){return u(c(h,y,w),h.align)}function l(h,g,y,w){const v=f(h,y,w),p=u([v]);return p.slice(0,p.indexOf(`
220
+ `))}function s(h,g,y,w){const v=y.enter("tableCell"),p=y.enter("phrasing"),b=y.containerPhrasing(h,{...w,before:o,after:o});return p(),v(),b}function u(h,g){return DD(h,{align:g,alignDelimiters:r,padding:n,stringLength:i})}function c(h,g,y){const w=h.children;let v=-1;const p=[],b=g.enter("table");for(;++v<w.length;)p[v]=f(w[v],g,y);return b(),p}function f(h,g,y){const w=h.children;let v=-1;const p=[],b=g.enter("tableRow");for(;++v<w.length;)p[v]=s(w[v],h,g,y);return b(),p}function d(h,g,y){let w=jC.inlineCode(h,g,y);return y.stack.includes("tableCell")&&(w=w.replace(/\|/g,"\\$&")),w}}function E3(){return{exit:{taskListCheckValueChecked:s1,taskListCheckValueUnchecked:s1,paragraph:P3}}}function _3(){return{unsafe:[{atBreak:!0,character:"-",after:"[:|-]"}],handlers:{listItem:I3}}}function s1(e){const t=this.stack[this.stack.length-2];t.type,t.checked=e.type==="taskListCheckValueChecked"}function P3(e){const t=this.stack[this.stack.length-2];if(t&&t.type==="listItem"&&typeof t.checked=="boolean"){const n=this.stack[this.stack.length-1];n.type;const r=n.children[0];if(r&&r.type==="text"){const i=t.children;let o=-1,a;for(;++o<i.length;){const l=i[o];if(l.type==="paragraph"){a=l;break}}a===n&&(r.value=r.value.slice(1),r.value.length===0?n.children.shift():n.position&&r.position&&typeof r.position.start.offset=="number"&&(r.position.start.column++,r.position.start.offset++,n.position.start=Object.assign({},r.position.start)))}}this.exit(e)}function I3(e,t,n,r){const i=e.children[0],o=typeof e.checked=="boolean"&&i&&i.type==="paragraph",a="["+(e.checked?"x":" ")+"] ",l=n.createTracker(r);o&&l.move(a);let s=jC.listItem(e,t,n,{...r,...l.current()});return o&&(s=s.replace(/^(?:[*+-]|\d+\.)([\r\n]| {1,3})/,u)),s;function u(c){return c+a}}function T3(){return[sD(),TD(),OD(),y3(),E3()]}function $3(e){return{extensions:[uD(),$D(e),ND(),k3(e),_3()]}}const R3={tokenize:L3,partial:!0},BC={tokenize:z3,partial:!0},HC={tokenize:D3,partial:!0},VC={tokenize:j3,partial:!0},M3={tokenize:B3,partial:!0},WC={name:"wwwAutolink",tokenize:A3,previous:KC},UC={name:"protocolAutolink",tokenize:F3,previous:qC},di={name:"emailAutolink",tokenize:N3,previous:GC},Yr={};function O3(){return{text:Yr}}let to=48;for(;to<123;)Yr[to]=di,to++,to===58?to=65:to===91&&(to=97);Yr[43]=di;Yr[45]=di;Yr[46]=di;Yr[95]=di;Yr[72]=[di,UC];Yr[104]=[di,UC];Yr[87]=[di,WC];Yr[119]=[di,WC];function N3(e,t,n){const r=this;let i,o;return a;function a(f){return!km(f)||!GC.call(r,r.previous)||mg(r.events)?n(f):(e.enter("literalAutolink"),e.enter("literalAutolinkEmail"),l(f))}function l(f){return km(f)?(e.consume(f),l):f===64?(e.consume(f),s):n(f)}function s(f){return f===46?e.check(M3,c,u)(f):f===45||f===95||vn(f)?(o=!0,e.consume(f),s):c(f)}function u(f){return e.consume(f),i=!0,s}function c(f){return o&&i&&Cn(r.previous)?(e.exit("literalAutolinkEmail"),e.exit("literalAutolink"),t(f)):n(f)}}function A3(e,t,n){const r=this;return i;function i(a){return a!==87&&a!==119||!KC.call(r,r.previous)||mg(r.events)?n(a):(e.enter("literalAutolink"),e.enter("literalAutolinkWww"),e.check(R3,e.attempt(BC,e.attempt(HC,o),n),n)(a))}function o(a){return e.exit("literalAutolinkWww"),e.exit("literalAutolink"),t(a)}}function F3(e,t,n){const r=this;let i="",o=!1;return a;function a(f){return(f===72||f===104)&&qC.call(r,r.previous)&&!mg(r.events)?(e.enter("literalAutolink"),e.enter("literalAutolinkHttp"),i+=String.fromCodePoint(f),e.consume(f),l):n(f)}function l(f){if(Cn(f)&&i.length<5)return i+=String.fromCodePoint(f),e.consume(f),l;if(f===58){const d=i.toLowerCase();if(d==="http"||d==="https")return e.consume(f),s}return n(f)}function s(f){return f===47?(e.consume(f),o?u:(o=!0,s)):n(f)}function u(f){return f===null||hc(f)||dt(f)||xo(f)||ff(f)?n(f):e.attempt(BC,e.attempt(HC,c),n)(f)}function c(f){return e.exit("literalAutolinkHttp"),e.exit("literalAutolink"),t(f)}}function L3(e,t,n){let r=0;return i;function i(a){return(a===87||a===119)&&r<3?(r++,e.consume(a),i):a===46&&r===3?(e.consume(a),o):n(a)}function o(a){return a===null?n(a):t(a)}}function z3(e,t,n){let r,i,o;return a;function a(u){return u===46||u===95?e.check(VC,s,l)(u):u===null||dt(u)||xo(u)||u!==45&&ff(u)?s(u):(o=!0,e.consume(u),a)}function l(u){return u===95?r=!0:(i=r,r=void 0),e.consume(u),a}function s(u){return i||r||!o?n(u):t(u)}}function D3(e,t){let n=0,r=0;return i;function i(a){return a===40?(n++,e.consume(a),i):a===41&&r<n?o(a):a===33||a===34||a===38||a===39||a===41||a===42||a===44||a===46||a===58||a===59||a===60||a===63||a===93||a===95||a===126?e.check(VC,t,o)(a):a===null||dt(a)||xo(a)?t(a):(e.consume(a),i)}function o(a){return a===41&&r++,e.consume(a),i}}function j3(e,t,n){return r;function r(l){return l===33||l===34||l===39||l===41||l===42||l===44||l===46||l===58||l===59||l===63||l===95||l===126?(e.consume(l),r):l===38?(e.consume(l),o):l===93?(e.consume(l),i):l===60||l===null||dt(l)||xo(l)?t(l):n(l)}function i(l){return l===null||l===40||l===91||dt(l)||xo(l)?t(l):r(l)}function o(l){return Cn(l)?a(l):n(l)}function a(l){return l===59?(e.consume(l),r):Cn(l)?(e.consume(l),a):n(l)}}function B3(e,t,n){return r;function r(o){return e.consume(o),i}function i(o){return vn(o)?n(o):t(o)}}function KC(e){return e===null||e===40||e===42||e===95||e===91||e===93||e===126||dt(e)}function qC(e){return!Cn(e)}function GC(e){return!(e===47||km(e))}function km(e){return e===43||e===45||e===46||e===95||vn(e)}function mg(e){let t=e.length,n=!1;for(;t--;){const r=e[t][1];if((r.type==="labelLink"||r.type==="labelImage")&&!r._balanced){n=!0;break}if(r._gfmAutolinkLiteralWalkedInto){n=!1;break}}return e.length>0&&!n&&(e[e.length-1][1]._gfmAutolinkLiteralWalkedInto=!0),n}const H3={tokenize:Q3,partial:!0};function V3(){return{document:{91:{name:"gfmFootnoteDefinition",tokenize:q3,continuation:{tokenize:G3},exit:X3}},text:{91:{name:"gfmFootnoteCall",tokenize:K3},93:{name:"gfmPotentialFootnoteCall",add:"after",tokenize:W3,resolveTo:U3}}}}function W3(e,t,n){const r=this;let i=r.events.length;const o=r.parser.gfmFootnotes||(r.parser.gfmFootnotes=[]);let a;for(;i--;){const s=r.events[i][1];if(s.type==="labelImage"){a=s;break}if(s.type==="gfmFootnoteCall"||s.type==="labelLink"||s.type==="label"||s.type==="image"||s.type==="link")break}return l;function l(s){if(!a||!a._balanced)return n(s);const u=Ar(r.sliceSerialize({start:a.end,end:r.now()}));return u.codePointAt(0)!==94||!o.includes(u.slice(1))?n(s):(e.enter("gfmFootnoteCallLabelMarker"),e.consume(s),e.exit("gfmFootnoteCallLabelMarker"),t(s))}}function U3(e,t){let n=e.length;for(;n--;)if(e[n][1].type==="labelImage"&&e[n][0]==="enter"){e[n][1];break}e[n+1][1].type="data",e[n+3][1].type="gfmFootnoteCallLabelMarker";const r={type:"gfmFootnoteCall",start:Object.assign({},e[n+3][1].start),end:Object.assign({},e[e.length-1][1].end)},i={type:"gfmFootnoteCallMarker",start:Object.assign({},e[n+3][1].end),end:Object.assign({},e[n+3][1].end)};i.end.column++,i.end.offset++,i.end._bufferIndex++;const o={type:"gfmFootnoteCallString",start:Object.assign({},i.end),end:Object.assign({},e[e.length-1][1].start)},a={type:"chunkString",contentType:"string",start:Object.assign({},o.start),end:Object.assign({},o.end)},l=[e[n+1],e[n+2],["enter",r,t],e[n+3],e[n+4],["enter",i,t],["exit",i,t],["enter",o,t],["enter",a,t],["exit",a,t],["exit",o,t],e[e.length-2],e[e.length-1],["exit",r,t]];return e.splice(n,e.length-n+1,...l),e}function K3(e,t,n){const r=this,i=r.parser.gfmFootnotes||(r.parser.gfmFootnotes=[]);let o=0,a;return l;function l(f){return e.enter("gfmFootnoteCall"),e.enter("gfmFootnoteCallLabelMarker"),e.consume(f),e.exit("gfmFootnoteCallLabelMarker"),s}function s(f){return f!==94?n(f):(e.enter("gfmFootnoteCallMarker"),e.consume(f),e.exit("gfmFootnoteCallMarker"),e.enter("gfmFootnoteCallString"),e.enter("chunkString").contentType="string",u)}function u(f){if(o>999||f===93&&!a||f===null||f===91||dt(f))return n(f);if(f===93){e.exit("chunkString");const d=e.exit("gfmFootnoteCallString");return i.includes(Ar(r.sliceSerialize(d)))?(e.enter("gfmFootnoteCallLabelMarker"),e.consume(f),e.exit("gfmFootnoteCallLabelMarker"),e.exit("gfmFootnoteCall"),t):n(f)}return dt(f)||(a=!0),o++,e.consume(f),f===92?c:u}function c(f){return f===91||f===92||f===93?(e.consume(f),o++,u):u(f)}}function q3(e,t,n){const r=this,i=r.parser.gfmFootnotes||(r.parser.gfmFootnotes=[]);let o,a=0,l;return s;function s(g){return e.enter("gfmFootnoteDefinition")._container=!0,e.enter("gfmFootnoteDefinitionLabel"),e.enter("gfmFootnoteDefinitionLabelMarker"),e.consume(g),e.exit("gfmFootnoteDefinitionLabelMarker"),u}function u(g){return g===94?(e.enter("gfmFootnoteDefinitionMarker"),e.consume(g),e.exit("gfmFootnoteDefinitionMarker"),e.enter("gfmFootnoteDefinitionLabelString"),e.enter("chunkString").contentType="string",c):n(g)}function c(g){if(a>999||g===93&&!l||g===null||g===91||dt(g))return n(g);if(g===93){e.exit("chunkString");const y=e.exit("gfmFootnoteDefinitionLabelString");return o=Ar(r.sliceSerialize(y)),e.enter("gfmFootnoteDefinitionLabelMarker"),e.consume(g),e.exit("gfmFootnoteDefinitionLabelMarker"),e.exit("gfmFootnoteDefinitionLabel"),d}return dt(g)||(l=!0),a++,e.consume(g),g===92?f:c}function f(g){return g===91||g===92||g===93?(e.consume(g),a++,c):c(g)}function d(g){return g===58?(e.enter("definitionMarker"),e.consume(g),e.exit("definitionMarker"),i.includes(o)||i.push(o),Le(e,h,"gfmFootnoteDefinitionWhitespace")):n(g)}function h(g){return t(g)}}function G3(e,t,n){return e.check(ws,t,e.attempt(H3,t,n))}function X3(e){e.exit("gfmFootnoteDefinition")}function Q3(e,t,n){const r=this;return Le(e,i,"gfmFootnoteDefinitionIndent",4+1);function i(o){const a=r.events[r.events.length-1];return a&&a[1].type==="gfmFootnoteDefinitionIndent"&&a[2].sliceSerialize(a[1],!0).length===4?t(o):n(o)}}function Y3(e){let n=(e||{}).singleTilde;const r={name:"strikethrough",tokenize:o,resolveAll:i};return n==null&&(n=!0),{text:{126:r},insideSpan:{null:[r]},attentionMarkers:{null:[126]}};function i(a,l){let s=-1;for(;++s<a.length;)if(a[s][0]==="enter"&&a[s][1].type==="strikethroughSequenceTemporary"&&a[s][1]._close){let u=s;for(;u--;)if(a[u][0]==="exit"&&a[u][1].type==="strikethroughSequenceTemporary"&&a[u][1]._open&&a[s][1].end.offset-a[s][1].start.offset===a[u][1].end.offset-a[u][1].start.offset){a[s][1].type="strikethroughSequence",a[u][1].type="strikethroughSequence";const c={type:"strikethrough",start:Object.assign({},a[u][1].start),end:Object.assign({},a[s][1].end)},f={type:"strikethroughText",start:Object.assign({},a[u][1].end),end:Object.assign({},a[s][1].start)},d=[["enter",c,l],["enter",a[u][1],l],["exit",a[u][1],l],["enter",f,l]],h=l.parser.constructs.insideSpan.null;h&&Qn(d,d.length,0,df(h,a.slice(u+1,s),l)),Qn(d,d.length,0,[["exit",f,l],["enter",a[s][1],l],["exit",a[s][1],l],["exit",c,l]]),Qn(a,u-1,s-u+3,d),s=u+d.length-2;break}}for(s=-1;++s<a.length;)a[s][1].type==="strikethroughSequenceTemporary"&&(a[s][1].type="data");return a}function o(a,l,s){const u=this.previous,c=this.events;let f=0;return d;function d(g){return u===126&&c[c.length-1][1].type!=="characterEscape"?s(g):(a.enter("strikethroughSequenceTemporary"),h(g))}function h(g){const y=ka(u);if(g===126)return f>1?s(g):(a.consume(g),f++,h);if(f<2&&!n)return s(g);const w=a.exit("strikethroughSequenceTemporary"),v=ka(g);return w._open=!v||v===2&&!!y,w._close=!y||y===2&&!!v,l(g)}}}class Z3{constructor(){this.map=[]}add(t,n,r){J3(this,t,n,r)}consume(t){if(this.map.sort(function(o,a){return o[0]-a[0]}),this.map.length===0)return;let n=this.map.length;const r=[];for(;n>0;)n-=1,r.push(t.slice(this.map[n][0]+this.map[n][1]),this.map[n][2]),t.length=this.map[n][0];r.push(t.slice()),t.length=0;let i=r.pop();for(;i;){for(const o of i)t.push(o);i=r.pop()}this.map.length=0}}function J3(e,t,n,r){let i=0;if(!(n===0&&r.length===0)){for(;i<e.map.length;){if(e.map[i][0]===t){e.map[i][1]+=n,e.map[i][2].push(...r);return}i+=1}e.map.push([t,n,r])}}function e5(e,t){let n=!1;const r=[];for(;t<e.length;){const i=e[t];if(n){if(i[0]==="enter")i[1].type==="tableContent"&&r.push(e[t+1][1].type==="tableDelimiterMarker"?"left":"none");else if(i[1].type==="tableContent"){if(e[t-1][1].type==="tableDelimiterMarker"){const o=r.length-1;r[o]=r[o]==="left"?"center":"right"}}else if(i[1].type==="tableDelimiterRow")break}else i[0]==="enter"&&i[1].type==="tableDelimiterRow"&&(n=!0);t+=1}return r}function t5(){return{flow:{null:{name:"table",tokenize:n5,resolveAll:r5}}}}function n5(e,t,n){const r=this;let i=0,o=0,a;return l;function l(k){let $=r.events.length-1;for(;$>-1;){const N=r.events[$][1].type;if(N==="lineEnding"||N==="linePrefix")$--;else break}const R=$>-1?r.events[$][1].type:null,M=R==="tableHead"||R==="tableRow"?x:s;return M===x&&r.parser.lazy[r.now().line]?n(k):M(k)}function s(k){return e.enter("tableHead"),e.enter("tableRow"),u(k)}function u(k){return k===124||(a=!0,o+=1),c(k)}function c(k){return k===null?n(k):fe(k)?o>1?(o=0,r.interrupt=!0,e.exit("tableRow"),e.enter("lineEnding"),e.consume(k),e.exit("lineEnding"),h):n(k):je(k)?Le(e,c,"whitespace")(k):(o+=1,a&&(a=!1,i+=1),k===124?(e.enter("tableCellDivider"),e.consume(k),e.exit("tableCellDivider"),a=!0,c):(e.enter("data"),f(k)))}function f(k){return k===null||k===124||dt(k)?(e.exit("data"),c(k)):(e.consume(k),k===92?d:f)}function d(k){return k===92||k===124?(e.consume(k),f):f(k)}function h(k){return r.interrupt=!1,r.parser.lazy[r.now().line]?n(k):(e.enter("tableDelimiterRow"),a=!1,je(k)?Le(e,g,"linePrefix",r.parser.constructs.disable.null.includes("codeIndented")?void 0:4)(k):g(k))}function g(k){return k===45||k===58?w(k):k===124?(a=!0,e.enter("tableCellDivider"),e.consume(k),e.exit("tableCellDivider"),y):C(k)}function y(k){return je(k)?Le(e,w,"whitespace")(k):w(k)}function w(k){return k===58?(o+=1,a=!0,e.enter("tableDelimiterMarker"),e.consume(k),e.exit("tableDelimiterMarker"),v):k===45?(o+=1,v(k)):k===null||fe(k)?S(k):C(k)}function v(k){return k===45?(e.enter("tableDelimiterFiller"),p(k)):C(k)}function p(k){return k===45?(e.consume(k),p):k===58?(a=!0,e.exit("tableDelimiterFiller"),e.enter("tableDelimiterMarker"),e.consume(k),e.exit("tableDelimiterMarker"),b):(e.exit("tableDelimiterFiller"),b(k))}function b(k){return je(k)?Le(e,S,"whitespace")(k):S(k)}function S(k){return k===124?g(k):k===null||fe(k)?!a||i!==o?C(k):(e.exit("tableDelimiterRow"),e.exit("tableHead"),t(k)):C(k)}function C(k){return n(k)}function x(k){return e.enter("tableRow"),_(k)}function _(k){return k===124?(e.enter("tableCellDivider"),e.consume(k),e.exit("tableCellDivider"),_):k===null||fe(k)?(e.exit("tableRow"),t(k)):je(k)?Le(e,_,"whitespace")(k):(e.enter("data"),I(k))}function I(k){return k===null||k===124||dt(k)?(e.exit("data"),_(k)):(e.consume(k),k===92?T:I)}function T(k){return k===92||k===124?(e.consume(k),I):I(k)}}function r5(e,t){let n=-1,r=!0,i=0,o=[0,0,0,0],a=[0,0,0,0],l=!1,s=0,u,c,f;const d=new Z3;for(;++n<e.length;){const h=e[n],g=h[1];h[0]==="enter"?g.type==="tableHead"?(l=!1,s!==0&&(u1(d,t,s,u,c),c=void 0,s=0),u={type:"table",start:Object.assign({},g.start),end:Object.assign({},g.end)},d.add(n,0,[["enter",u,t]])):g.type==="tableRow"||g.type==="tableDelimiterRow"?(r=!0,f=void 0,o=[0,0,0,0],a=[0,n+1,0,0],l&&(l=!1,c={type:"tableBody",start:Object.assign({},g.start),end:Object.assign({},g.end)},d.add(n,0,[["enter",c,t]])),i=g.type==="tableDelimiterRow"?2:c?3:1):i&&(g.type==="data"||g.type==="tableDelimiterMarker"||g.type==="tableDelimiterFiller")?(r=!1,a[2]===0&&(o[1]!==0&&(a[0]=a[1],f=iu(d,t,o,i,void 0,f),o=[0,0,0,0]),a[2]=n)):g.type==="tableCellDivider"&&(r?r=!1:(o[1]!==0&&(a[0]=a[1],f=iu(d,t,o,i,void 0,f)),o=a,a=[o[1],n,0,0])):g.type==="tableHead"?(l=!0,s=n):g.type==="tableRow"||g.type==="tableDelimiterRow"?(s=n,o[1]!==0?(a[0]=a[1],f=iu(d,t,o,i,n,f)):a[1]!==0&&(f=iu(d,t,a,i,n,f)),i=0):i&&(g.type==="data"||g.type==="tableDelimiterMarker"||g.type==="tableDelimiterFiller")&&(a[3]=n)}for(s!==0&&u1(d,t,s,u,c),d.consume(t.events),n=-1;++n<t.events.length;){const h=t.events[n];h[0]==="enter"&&h[1].type==="table"&&(h[1]._align=e5(t.events,n))}return e}function iu(e,t,n,r,i,o){const a=r===1?"tableHeader":r===2?"tableDelimiter":"tableData",l="tableContent";n[0]!==0&&(o.end=Object.assign({},Bo(t.events,n[0])),e.add(n[0],0,[["exit",o,t]]));const s=Bo(t.events,n[1]);if(o={type:a,start:Object.assign({},s),end:Object.assign({},s)},e.add(n[1],0,[["enter",o,t]]),n[2]!==0){const u=Bo(t.events,n[2]),c=Bo(t.events,n[3]),f={type:l,start:Object.assign({},u),end:Object.assign({},c)};if(e.add(n[2],0,[["enter",f,t]]),r!==2){const d=t.events[n[2]],h=t.events[n[3]];if(d[1].end=Object.assign({},h[1].end),d[1].type="chunkText",d[1].contentType="text",n[3]>n[2]+1){const g=n[2]+1,y=n[3]-n[2]-1;e.add(g,y,[])}}e.add(n[3]+1,0,[["exit",f,t]])}return i!==void 0&&(o.end=Object.assign({},Bo(t.events,i)),e.add(i,0,[["exit",o,t]]),o=void 0),o}function u1(e,t,n,r,i){const o=[],a=Bo(t.events,n);i&&(i.end=Object.assign({},a),o.push(["exit",i,t])),r.end=Object.assign({},a),o.push(["exit",r,t]),e.add(n+1,0,o)}function Bo(e,t){const n=e[t],r=n[0]==="enter"?"start":"end";return n[1][r]}const i5={name:"tasklistCheck",tokenize:a5};function o5(){return{text:{91:i5}}}function a5(e,t,n){const r=this;return i;function i(s){return r.previous!==null||!r._gfmTasklistFirstContentOfListItem?n(s):(e.enter("taskListCheck"),e.enter("taskListCheckMarker"),e.consume(s),e.exit("taskListCheckMarker"),o)}function o(s){return dt(s)?(e.enter("taskListCheckValueUnchecked"),e.consume(s),e.exit("taskListCheckValueUnchecked"),a):s===88||s===120?(e.enter("taskListCheckValueChecked"),e.consume(s),e.exit("taskListCheckValueChecked"),a):n(s)}function a(s){return s===93?(e.enter("taskListCheckMarker"),e.consume(s),e.exit("taskListCheckMarker"),e.exit("taskListCheck"),l):n(s)}function l(s){return fe(s)?t(s):je(s)?e.check({tokenize:l5},t,n)(s):n(s)}}function l5(e,t,n){return Le(e,r,"whitespace");function r(i){return i===null?n(i):t(i)}}function s5(e){return nC([O3(),V3(),Y3(e),t5(),o5()])}const u5={};function c5(e){const t=this,n=e||u5,r=t.data(),i=r.micromarkExtensions||(r.micromarkExtensions=[]),o=r.fromMarkdownExtensions||(r.fromMarkdownExtensions=[]),a=r.toMarkdownExtensions||(r.toMarkdownExtensions=[]);i.push(s5(n)),o.push(T3()),a.push($3(n))}function f5(e){EC(e,[/\r?\n|\r/g,d5])}function d5(){return{type:"break"}}function h5(){return function(e){f5(e)}}const m5="_markdown_3p4mr_1",p5={markdown:m5},vc=({message:e,customClass:t={}})=>te("div",{className:oe(p5.markdown,t),children:te(H4,{children:e||"",remarkPlugins:[c5,rD,h5]})}),XC=e=>te(Ti,{children:e.reports&&Array.isArray(e.reports)&&e.reports.length>0&&te("div",{className:oe(Vr.reportContent,(e==null?void 0:e.isOnlyReport)&&Vr.onlyReportContent),children:e.reports.map(t=>Ot(Ti,{children:[Ot("div",{className:Vr.title,children:[t.title,t.level&&te(Ti,{children:t.level==="low"?te("div",{className:oe(Vr.icon,Vr.success),children:"ไฝŽๅฑ"}):t.level==="medium"?te("div",{className:oe(Vr.icon,Vr.warning),children:"ไธญๅฑ"}):te("div",{className:oe(Vr.icon,Vr.fail),children:"้ซ˜ๅฑ"})})]}),te("div",{className:Vr.has_issues,children:te(vc,{message:t.issues})})]}))})}),c1=e=>{var t;return Ot("div",{className:On.sContainer,children:[e.executiveSummary&&Ot("div",{className:On.summary,children:[te("span",{className:On.title,children:"ๆ‰ง่กŒๆ‘˜่ฆ"}),te(vc,{message:e.executiveSummary,customClass:On.yellowWords})]}),te("div",{className:On.content,children:(t=e==null?void 0:e.statistics)==null?void 0:t.map(n=>Ot("div",{className:On.box,children:[te("div",{className:On.label,children:n.label}),te("div",{style:{color:n.colorClass,fontWeight:600,fontSize:48},children:n.value})]}))})]})},f1=e=>{var n;let t=[];return Array.isArray(e==null?void 0:e.issues)&&(e==null?void 0:e.issues.length)>0&&(t=(n=e.issues)==null?void 0:n.map(r=>({title:r.issue_name,issues:r.issues,level:r.severity}))),!t||t.length===0?Ot("div",{className:On.noResult,children:[te("span",{children:"ๆš‚ๆ— ๆต‹่ฏ•็ป“ๆžœ"}),te(Xp,{className:On.noIcon})]}):te("div",{className:On.sContainer,children:te(XC,{reports:t,isOnlyReport:!0})})},g5=e=>{console.log("props:",e);const t=m.useMemo(()=>Array.isArray(e.tabs)?e.tabs.map(n=>{var r,i,o,a;return(n==null?void 0:n.id)!=="subtab-summary-advice"?{label:Ot("div",{className:On.issuesBox,children:[n.title,te("div",{className:oe(On.issuesCount,((i=(r=n.content)==null?void 0:r.issues)==null?void 0:i.length)&&On.show),children:(a=(o=n.content)==null?void 0:o.issues)==null?void 0:a.length})]}),key:n.title,title:n.title,children:(n==null?void 0:n.id)==="subtab-summary-advice"?te(c1,{...n.content}):te(f1,{...n.content})}:{label:n.title,key:n.title,title:n.title,children:(n==null?void 0:n.id)==="subtab-summary-advice"?te(c1,{...n.content}):te(f1,{...n.content})}}):[],[e]);return t?te("div",{className:On.container,children:te(qp,{type:"card",defaultActiveKey:"1",items:t})}):te(Ti,{})};var $r=(e=>(e.passed="passed",e.warning="warning",e.failed="failed",e))($r||{});const v5="_container_t5k03_1",y5="_sContainer_t5k03_6",b5="_subContainer_t5k03_10",w5="_noResult_t5k03_15",x5="_noIcon_t5k03_36",S5="_badge_t5k03_39",C5="_badgeSuccess_t5k03_47",k5="_badgeFailure_t5k03_51",E5="_badgeWarning_t5k03_55",cr={container:v5,sContainer:y5,subContainer:b5,noResult:w5,noIcon:x5,badge:S5,badgeSuccess:C5,badgeFailure:k5,badgeWarning:E5},_5="_container_1kd9e_17",P5="_badge_1kd9e_23",I5="_badgeSuccess_1kd9e_31",T5="_badgeFailure_1kd9e_35",$5="_badgeWarning_1kd9e_39",R5="_screenshots_1kd9e_43",M5="_screenshotWrapper_1kd9e_64",O5="_screenshotTitle_1kd9e_71",N5="_screenshot_1kd9e_43",A5="_left_1kd9e_82",F5="_stepItem_1kd9e_108",L5="_stepNumber_1kd9e_120",z5="_stepDescription_1kd9e_127",D5="_stepTime_1kd9e_131",j5="_activeItem_1kd9e_136",B5="_right_1kd9e_142",H5="_actionsList_1kd9e_165",V5="_actionCard_1kd9e_168",W5="_success_1kd9e_175",U5="_failure_1kd9e_179",K5="_actionHeader_1kd9e_183",q5="_actionTitle_1kd9e_190",G5="_actionResult_1kd9e_193",X5="_actionResultSuccess_1kd9e_197",Q5="_actionResultFailure_1kd9e_200",Y5="_modelIOTitle_1kd9e_203",gt={"diy-scrollbar":"_diy-scrollbar_1kd9e_1",container:_5,badge:P5,badgeSuccess:I5,badgeFailure:T5,badgeWarning:$5,screenshots:R5,screenshotWrapper:M5,screenshotTitle:O5,screenshot:N5,left:A5,stepItem:F5,stepNumber:L5,stepDescription:z5,stepTime:D5,activeItem:j5,right:B5,actionsList:H5,actionCard:V5,success:W5,failure:U5,actionHeader:K5,actionTitle:q5,actionResult:G5,actionResultSuccess:X5,actionResultFailure:Q5,modelIOTitle:Y5},Z5=e=>{var r,i,o;const[t,n]=m.useState((r=e.testStep)==null?void 0:r[0]);return console.log("activeStep:",t),!e.testStep||Array.isArray(e.testStep)&&e.testStep.length===0?te(Ti,{}):Ot("div",{className:gt.container,children:[te("div",{className:gt.left,children:(i=e==null?void 0:e.testStep)==null?void 0:i.map(a=>Ot("div",{className:oe(gt.stepItem,t.id===a.id&&gt.activeItem),onClick:()=>{n(a)},children:[Ot("div",{className:gt.stepNumber,children:["ๆญฅ้ชค ",a.id,te("span",{className:oe(gt.badge,a.status===$r.passed?gt.badgeSuccess:a.status===$r.failed?gt.badgeFailure:a.status===$r.warning?gt.badgeWarning:gt.hide),children:a.status===$r.passed?"้€š่ฟ‡":a.status===$r.failed?"ไธ้€š่ฟ‡":"่ญฆๅ‘Š"})]}),te("div",{className:gt.stepDescription,children:te(vc,{message:a.description})})]}))}),te("div",{className:gt.screenshots,children:(o=t==null?void 0:t.screenshots)==null?void 0:o.map(a=>te("div",{className:gt.screenshotWrapper,children:te("img",{className:gt.screenshot,src:a.data})}))}),(Array.isArray(t.actions)&&t.actions.length>0||t.modelIO)&&Ot("div",{className:gt.right,children:[Array.isArray(t.actions)&&t.actions.length>0&&Ot("div",{className:gt.actionsList,children:[te("div",{className:gt.modelIOTitle,children:"ๆ‰ง่กŒๅŠจไฝœ"}),t.actions.map(a=>te("div",{className:oe(gt.actionCard,gt.success),"data-step":"0","data-case":"0",children:Ot("div",{className:gt.actionHeader,children:[te("div",{className:gt.actionTitle,children:a.index}),te("div",{className:gt.description,children:a.description}),te("div",{className:oe(gt.actionResult,gt.success),children:a.success?"โœ“ ๆˆๅŠŸ":"ร— ๅคฑ่ดฅ"})]})}))]}),t.modelIO&&Ot(Ti,{children:[te("div",{className:gt.modelIOTitle,children:"ๆจกๅž‹่พ“ๅ‡บ"}),te(vc,{message:t.modelIO})]})]})]})},J5=e=>{var t,n;return Ot("div",{className:cr.subContainer,children:[te(XC,{reports:e.report||[],isOnlyReport:Array.isArray(e.steps)&&((t=e.steps)==null?void 0:t.length)==0}),e.steps&&Array.isArray(e.steps)&&((n=e.steps)==null?void 0:n.length)>0&&te(Z5,{testStep:e.steps})]})},e6=e=>{const[t,n]=m.useState(""),r=m.useMemo(()=>{var i,o;return Array.isArray(e==null?void 0:e.subTest)&&(e==null?void 0:e.subTest.length)>0?(n((o=(i=e==null?void 0:e.subTest)==null?void 0:i[0])==null?void 0:o.name),e==null?void 0:e.subTest.map(a=>({label:Ot("div",{children:[" ",a.name," ",te("span",{className:oe(cr.badge,a.status===$r.passed?cr.badgeSuccess:a.status===$r.failed?cr.badgeFailure:a.status===$r.warning?cr.badgeWarning:cr.hide),children:a.status===$r.passed?"้€š่ฟ‡":a.status===$r.failed?"ไธ้€š่ฟ‡":"่ญฆๅ‘Š"})]}),key:a.name,title:a.name,children:te(J5,{...a})}))):[]},[e]);return!r||r.length===0?Ot("div",{className:cr.noResult,children:[te("span",{children:"ๆš‚ๆ— ๆต‹่ฏ•็ป“ๆžœ"}),te(Xp,{className:cr.noIcon})]}):te("div",{className:cr.sContainer,children:te(qp,{size:"small",type:"card",defaultActiveKey:t,items:r})})},t6=e=>{const[t,n]=m.useState(""),r=m.useMemo(()=>{var i,o;return Array.isArray(e.items)&&((i=e.items)==null?void 0:i.length)>0?(n((o=e.items)==null?void 0:o[0].test_id),e.items.map(a=>({label:a.test_name,key:a.test_id,title:a.test_name,children:te(e6,{subTest:a.sub_tests})}))):[]},[e.items]);return!r||r.length===0?Ot("div",{className:cr.noResult,children:[te("span",{children:"ๆš‚ๆ— ๆต‹่ฏ•็ป“ๆžœ"}),te(Xp,{className:cr.noIcon})]}):te("div",{className:cr.container,children:te(qp,{size:"small",type:"card",defaultActiveKey:t,items:r})})},n6="_container_1tejq_1",r6="_name_1tejq_5",i6={container:n6,name:r6},d1=({title:e,children:t})=>te("div",{className:i6.container,children:t});function o6(){const[e,t]=m.useState([]),[n,r]=m.useState("aggregated_results");m.useEffect(()=>{var a,l,s,u,c;let o=[{name:(l=(a=window==null?void 0:window.testResultData)==null?void 0:a.aggregated_results)==null?void 0:l.title,key:"aggregated_results"}];for(let f in(s=window==null?void 0:window.testResultData)==null?void 0:s.test_results)o.push({name:(c=(u=window==null?void 0:window.testResultData)==null?void 0:u.test_results)==null?void 0:c[f].title,key:f});r("aggregated_results"),t(o)},[]);const i=m.useMemo(()=>{var o,a,l,s,u,c;return n==="aggregated_results"?te(d1,{title:(o=window==null?void 0:window.testResultData)==null?void 0:o.aggregated_results.title,children:te(g5,{...(a=window==null?void 0:window.testResultData)==null?void 0:a.aggregated_results})}):te(d1,{title:(s=(l=window==null?void 0:window.testResultData)==null?void 0:l.test_results)==null?void 0:s[n].title,children:te(t6,{...(c=(u=window==null?void 0:window.testResultData)==null?void 0:u.test_results)==null?void 0:c[n]})})},[n]);return window!=null&&window.testResultData?(console.log("menuList:",e,n),Ot("div",{className:Ws.container,children:[te("div",{className:Ws.left,children:te(jN,{children:Ot(Up,{children:[te("div",{className:Ws.webTitle,children:"WebQA"}),te(i0,{mode:"inline",defaultSelectedKeys:["aggregated_results"],children:e&&(e==null?void 0:e.length)>0&&e.map(o=>te(i0.Item,{onClick:()=>{r(o.key)},children:te("span",{className:"nav-text",children:o.name})},o.key))})]})})}),te("div",{className:Ws.right,children:i})]})):te(Ti,{})}Cd.createRoot(document.getElementById("root")).render(te(De.StrictMode,{children:te(h2,{theme:{token:{colorBgLayout:"##F9FAFC",colorPrimary:"#1677ff",colorLink:"#1677ff",controlItemBgActive:"#F4F5F9",controlItemBgActiveHover:"#F4F5F9",borderRadius:2,colorFillTertiary:"#F4F5F9",colorFillSecondary:"#F4F5F9",colorErrorBg:"#F4F5F9",colorErrorBgHover:"#F4F5F9"},components:{Select:{activeOutlineColor:"rgba(0, 0, 0, 0)"},Layout:{siderBg:"##F9FAFC"}}},children:te(o6,{})})}));
webqa_agent/static/assets/index_en-US.js ADDED
The diff for this file is too large to render. See raw diff
 
webqa_agent/static/i18n/en-US.json ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "aggregator": {
3
+ "assessment_overview": "Overview",
4
+ "summary_and_advice": "Summary & Advice",
5
+ "issue_list": "Issue List",
6
+ "issue_tracker_list": "Issue Tracker List",
7
+ "issue_list_note": "Note: This list summarizes all detected \"failed\" and \"warning\" items",
8
+ "assessment_categories": "Test Cases",
9
+ "passed_count": "Passed",
10
+ "failed_count": "Failed",
11
+ "test_failed_prefix": "Test Failed: ",
12
+ "execution_error_prefix": "Execution Error: ",
13
+ "llm_prompt_main": "You are an experienced software testing analyst. Please read the following sub-test information and extract [Issue Content], [Issue Count], and [Severity]:\n1) If status = pass, return JSON {\"issue_count\": 0}.\n2) If status != pass, judge based on the specific content of report, metrics, or final_summary:\n - Extract the most critical one-sentence issue description as issues\n - Count issue quantity as issue_count (if unable to count accurately, default to 1)\n - Severity assessment: First check if severity is already marked in the report (like high/medium/low, critical/major/minor, etc.), if so, follow directly; if not clearly marked in report, judge based on issue impact: high (serious impact on functionality/performance), medium (moderate impact), low (minor issues/warnings)\n3) You cannot output any other content or code blocks, only output unified JSON: {\"issue_count\": <number>, \"issues\": \"one-sentence English issue description\", \"severity\": \"high|medium|low\"}.",
14
+ "llm_prompt_test_info": "Sub-test information: "
15
+ },
16
+ "testers": {
17
+ "basic": {
18
+ "basic_test_display": "Basic Function Test - ",
19
+ "accessibility_check": "Accessibility Check",
20
+ "main_link_check": "Main Link Check",
21
+ "sub_link_check": "Sub Link Check",
22
+ "test_results": "Test Results",
23
+ "clickable_element_check": "Clickable Element Traversal Check",
24
+ "click_element": "Click Element",
25
+ "traversal_test_results": "Traversal Test Results",
26
+ "clickable_elements_count": "Clickable elements",
27
+ "click_failed_count": ", click actions failed"
28
+ },
29
+ "performance": {
30
+ "core_metrics": "Core Web Vitals",
31
+ "improve": "Improve",
32
+ "current_value": "Current value",
33
+ "target": "Target",
34
+ "performance_optimization": "Performance Optimization",
35
+ "potential_savings": "Potential savings",
36
+ "resource_optimization": "Resource Optimization",
37
+ "reduce_total_size": "Reduce total page size",
38
+ "current": "Current",
39
+ "optimize_third_party": "Optimize third-party resource usage",
40
+ "performance_diagnosis": "Performance Diagnosis",
41
+ "seo": "SEO",
42
+ "images": "images",
43
+ "links": "links",
44
+ "example": "e.g.",
45
+ "overall_score": "Overall Score",
46
+ "issues_to_improve": "Issues to Improve",
47
+ "performance_metrics": "Performance Metrics"
48
+ },
49
+ "ux": {
50
+ "ux_test_display": "UX Test - ",
51
+ "text_check_name": "Text Check",
52
+ "page_blank_error": "Page is blank, no visible content",
53
+ "no_issues_found": "No issues found",
54
+ "report_title": "Text Check",
55
+ "overall_problem": "**Overall Problem:** ",
56
+ "issue_details": "### {}. Issue Details\n\n",
57
+ "location": "**Location:** ",
58
+ "error_content": "**Current Error:** ",
59
+ "suggested_fix": "**Suggested Fix:** ",
60
+ "error_type": "**Error Type:** ",
61
+ "unknown_location": "Unknown Location",
62
+ "unknown_type": "Unknown Type",
63
+ "layout_check_name": "Web Content Check",
64
+ "element_check_name": "Web Element Check",
65
+ "report_title_content": "Content Check",
66
+ "layout_case": "Layout Check",
67
+ "image_case": "Element Check",
68
+ "text_case": "Text Typography"
69
+ },
70
+ "security": {
71
+ "cve_scan": "Known CVE Vulnerability Scan",
72
+ "xss_scan": "Cross-site Scripting Detection",
73
+ "sqli_scan": "SQL Injection Detection",
74
+ "rce_scan": "Remote Code Execution Detection",
75
+ "lfi_scan": "Local File Inclusion Detection",
76
+ "ssrf_scan": "Server-side Request Forgery Detection",
77
+ "redirect_scan": "Open Redirect Detection",
78
+ "exposure_scan": "Sensitive Information Exposure Detection",
79
+ "config_scan": "Configuration Error Detection",
80
+ "default_login_scan": "Default Credentials Detection",
81
+ "ssl_scan": "SSL/TLS Configuration Detection",
82
+ "dns_scan": "DNS Related Detection",
83
+ "subdomain_takeover_scan": "Subdomain Takeover Detection",
84
+ "tech_scan": "Technology Stack Identification",
85
+ "panel_scan": "Admin Panel Detection",
86
+ "http_protocol": "HTTP Protocol Scan",
87
+ "dns_protocol": "DNS Protocol Scan",
88
+ "tcp_protocol": "TCP Protocol Scan",
89
+ "ssl_protocol": "SSL Protocol Scan",
90
+ "critical_vulnerability": "Critical Vulnerability Scan",
91
+ "high_risk_vulnerability": "High Risk Vulnerability Scan",
92
+ "medium_risk_vulnerability": "Medium Risk Vulnerability Scan",
93
+ "no_security_issues": "No related security issues found",
94
+ "found_issues": "Found {count} issues",
95
+ "including": "including",
96
+ "and_more": "and more",
97
+ "security_check": "Security Check",
98
+ "no_issues_found": "No issues found",
99
+ "nuclei_check": "Nuclei Check",
100
+ "nuclei_not_found": "Nuclei tool not found. Please install nuclei: go install -v github.com/projectdiscovery/nuclei/v3/cmd/nuclei@latest",
101
+ "no_severity_issues": "No {severity} level security issues found",
102
+ "found_severity_issues": "Found {count} {severity} level security issues",
103
+ "severity_level_scan": "{severity} Level Security Issues Scan",
104
+ "severity_level_vulnerability": "{severity} Level Security Vulnerability Scan",
105
+ "matched_at": "Matched at",
106
+ "extracted": "Extracted",
107
+ "no_details": "No further details"
108
+ },
109
+ "ai_function": {
110
+ "intelligent_function_test": "Intelligent Function Test"
111
+ }
112
+ },
113
+ "common": {
114
+ "level": "level",
115
+ "issues": "issues"
116
+ },
117
+ "display": {
118
+ "completed_tasks": "๐ŸŽ‰ Completed Tasks",
119
+ "running_tasks": "๐Ÿš€ Running Tasks",
120
+ "total_time": "โฑ๏ธ Total Time",
121
+ "error_tasks": "โš ๏ธ Error Tasks:",
122
+ "error_message": "Error Message:",
123
+ "task_execution_summary": "๐Ÿ“Š Task Execution Summary",
124
+ "no_issues_found": "No issues found"
125
+ }
126
+ }
127
+
webqa_agent/static/i18n/zh-CN.json ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "aggregator": {
3
+ "assessment_overview": "่ฏ„ไผฐๆ€ป่งˆ",
4
+ "summary_and_advice": "ๆ‘˜่ฆไธŽๅปบ่ฎฎ",
5
+ "issue_list": "้—ฎ้ข˜ๅˆ—่กจ",
6
+ "issue_tracker_list": "้—ฎ้ข˜่ฟฝ่ธชๅˆ—่กจ",
7
+ "issue_list_note": "ๆณจ๏ผšๆญคๅˆ—่กจๆฑ‡ๆ€ปไบ†ๆ‰€ๆœ‰ๆฃ€ๆต‹ๅˆฐ็š„\"ๅคฑ่ดฅ\"ๅ’Œ\"่ญฆๅ‘Š\"้กน",
8
+ "assessment_categories": "่ฏ„ไผฐ็ฑปๅˆซ",
9
+ "passed_count": "้€š่ฟ‡ๆ•ฐ",
10
+ "failed_count": "ๅคฑ่ดฅๆ•ฐ",
11
+ "test_failed_prefix": "ๆต‹่ฏ•ไธ้€š่ฟ‡: ",
12
+ "execution_error_prefix": "ๆ‰ง่กŒๅผ‚ๅธธ: ",
13
+ "llm_prompt_main": "ไฝ ๆ˜ฏไธ€ๅ็ป้ชŒไธฐๅฏŒ็š„่ฝฏไปถๆต‹่ฏ•ๅˆ†ๆžๅธˆใ€‚่ฏท้˜…่ฏปไปฅไธ‹ๅญๆต‹่ฏ•ไฟกๆฏ๏ผŒๆๅ–ใ€้—ฎ้ข˜ๅ†…ๅฎนใ€‘ใ€ใ€้—ฎ้ข˜ๆ•ฐ้‡ใ€‘ๅ’Œใ€ไธฅ้‡็จ‹ๅบฆใ€‘๏ผš\n1๏ผ‰ๅฆ‚ๆžœ status = pass๏ผŒ่ฏท่ฟ”ๅ›ž JSON {\"issue_count\": 0}ใ€‚\n2๏ผ‰ๅฆ‚ๆžœ status != pass๏ผŒๅˆ™ๆ นๆฎ reportใ€metrics ๆˆ– final_summary ็š„ๅ…ทไฝ“ๅ†…ๅฎนๅˆคๆ–ญ๏ผš\n - ๆๅ–ๆœ€ๅ…ณ้”ฎ็š„ไธ€ๅฅ่ฏ้—ฎ้ข˜ๆ่ฟฐ issues\n - ็ปŸ่ฎก้—ฎ้ข˜ๆ•ฐ้‡ issue_count๏ผˆๅฆ‚ๆžœๆ— ๆณ•ๅ‡†็กฎ็ปŸ่ฎก๏ผŒๅฏ้ป˜่ฎคไธบ 1๏ผ‰\n - ไธฅ้‡็จ‹ๅบฆๅˆคๆ–ญ๏ผšไผ˜ๅ…ˆๆŸฅ็œ‹ report ไธญๆ˜ฏๅฆๅทฒๆ ‡ๆ˜Žไธฅ้‡็จ‹ๅบฆ๏ผˆๅฆ‚ high/medium/lowใ€ไธฅ้‡/ไธญ็ญ‰/่ฝปๅพฎใ€critical/major/minor ็ญ‰๏ผ‰๏ผŒๅฆ‚ๆžœๆœ‰ๅˆ™็›ดๆŽฅ้ตๅพช๏ผ›ๅฆ‚ๆžœ report ไธญๆฒกๆœ‰ๆ˜Ž็กฎๆ ‡ๆ˜Ž๏ผŒๅˆ™ๆ นๆฎ้—ฎ้ข˜ๅฝฑๅ“็จ‹ๅบฆ่‡ช่กŒๅˆคๆ–ญ๏ผšhigh๏ผˆไธฅ้‡ๅฝฑๅ“ๅŠŸ่ƒฝ/ๆ€ง่ƒฝ๏ผ‰ใ€medium๏ผˆไธญ็ญ‰ๅฝฑๅ“๏ผ‰ใ€low๏ผˆ่ฝปๅพฎ้—ฎ้ข˜/่ญฆๅ‘Š๏ผ‰\n3๏ผ‰ไฝ ไธ่ƒฝ่พ“ๅ‡บไปปไฝ•ๅ…ถไป–ๅ†…ๅฎน๏ผŒไนŸไธ่ƒฝ่พ“ๅ‡บไปฃ็ ๅ—๏ผŒๅช่ƒฝ่พ“ๅ‡บ็ปŸไธ€ไธบ JSON๏ผš{\"issue_count\": <ๆ•ฐๅญ—>, \"issues\": \"ไธ€ๅฅ่ฏไธญๆ–‡้—ฎ้ข˜ๆ่ฟฐ\", \"severity\": \"high|medium|low\"}ใ€‚",
14
+ "llm_prompt_test_info": "ๅญๆต‹่ฏ•ไฟกๆฏ: "
15
+ },
16
+ "testers": {
17
+ "basic": {
18
+ "basic_test_display": "้ๅކๅŠŸ่ƒฝๆต‹่ฏ• - ",
19
+ "accessibility_check": "ๅฏ่ฎฟ้—ฎๆ€งๆฃ€ๆŸฅ",
20
+ "main_link_check": "ไธป้“พๆŽฅๆฃ€ๆŸฅ",
21
+ "sub_link_check": "ๅญ้“พๆŽฅๆฃ€ๆŸฅ",
22
+ "test_results": "ๆต‹่ฏ•็ป“ๆžœ",
23
+ "clickable_element_check": "ๅฏ็‚นๅ‡ปๅ…ƒ็ด ้ๅކๆฃ€ๆŸฅ",
24
+ "click_element": "็‚นๅ‡ปๅ…ƒ็ด ",
25
+ "traversal_test_results": "้ๅކๆต‹่ฏ•็ป“ๆžœ",
26
+ "clickable_elements_count": "ๅฏ็‚นๅ‡ปๅ…ƒ็ด ",
27
+ "click_failed_count": "ไธช๏ผŒ็‚นๅ‡ป่กŒไธบๅคฑ่ดฅ"
28
+ },
29
+ "performance": {
30
+ "core_metrics": "ๆ ธๅฟƒๆŒ‡ๆ ‡",
31
+ "improve": "ๆ”น่ฟ›",
32
+ "current_value": "ๅฝ“ๅ‰ๅ€ผ",
33
+ "target": "็›ฎๆ ‡",
34
+ "performance_optimization": "ๆ€ง่ƒฝไผ˜ๅŒ–",
35
+ "potential_savings": "ๆฝœๅœจ่Š‚็œ",
36
+ "resource_optimization": "่ต„ๆบไผ˜ๅŒ–",
37
+ "reduce_total_size": "ๅ‡ๅฐ‘้กต้ขๆ€ปๅคงๅฐ",
38
+ "current": "ๅฝ“ๅ‰",
39
+ "optimize_third_party": "ไผ˜ๅŒ–็ฌฌไธ‰ๆ–น่ต„ๆบไฝฟ็”จ",
40
+ "performance_diagnosis": "ๆ€ง่ƒฝ่ฏŠๆ–ญ",
41
+ "seo": "SEO",
42
+ "images": "ไธชๅ›พ็‰‡",
43
+ "links": "ไธช้“พๆŽฅ",
44
+ "example": "ไพ‹ๅฆ‚",
45
+ "overall_score": "ๆ•ดไฝ“่ฏ„ๅˆ†",
46
+ "issues_to_improve": "ๅพ…ๆ”น่ฟ›้—ฎ้ข˜",
47
+ "performance_metrics": "ๆ€ง่ƒฝๆŒ‡ๆ ‡"
48
+ },
49
+ "ux": {
50
+ "ux_test_display": "็”จๆˆทไฝ“้ชŒๆต‹่ฏ• - ",
51
+ "text_check_name": "ๆ–‡ๆœฌๆฃ€ๆŸฅ",
52
+ "page_blank_error": "้กต้ข็™ฝๅฑ๏ผŒๆฒกๆœ‰ไปปไฝ•ๅฏ่งๅ†…ๅฎน",
53
+ "no_issues_found": "ๆ— ๅ‘็Žฐ้—ฎ้ข˜",
54
+ "report_title": "ๆ–‡ๆœฌๆฃ€ๆŸฅ",
55
+ "overall_problem": "**ๆ€ปไฝ“้—ฎ้ข˜๏ผš** ",
56
+ "issue_details": "### {}. ้—ฎ้ข˜่ฏฆๆƒ…\n\n",
57
+ "location": "**ไฝ็ฝฎ๏ผš** ",
58
+ "error_content": "**้”™่ฏฏๅ†…ๅฎน๏ผš** ",
59
+ "suggested_fix": "**ๅปบ่ฎฎไฟฎๆ”น๏ผš** ",
60
+ "error_type": "**้”™่ฏฏ็ฑปๅž‹๏ผš** ",
61
+ "unknown_location": "ๆœช็Ÿฅไฝ็ฝฎ",
62
+ "unknown_type": "ๆœช็Ÿฅ็ฑปๅž‹",
63
+ "layout_check_name": "็ฝ‘้กตๅ†…ๅฎนๆฃ€ๆŸฅ",
64
+ "element_check_name": "็ฝ‘้กตๅ…ƒ็ด ๆฃ€ๆŸฅ",
65
+ "report_title_content": "ๅ†…ๅฎนๆฃ€ๆŸฅ",
66
+ "layout_case": "ๅธƒๅฑ€ๆฃ€ๆŸฅ",
67
+ "image_case": "ๅ…ƒ็ด ๆฃ€ๆŸฅ",
68
+ "text_case": "ๆ–‡ๅญ—ๆŽ’็‰ˆ"
69
+ },
70
+ "security": {
71
+ "cve_scan": "ๅทฒ็ŸฅCVEๆผๆดžๆ‰ซๆ",
72
+ "xss_scan": "่ทจ็ซ™่„šๆœฌๆ”ปๅ‡ปๆฃ€ๆต‹",
73
+ "sqli_scan": "SQLๆณจๅ…ฅๆฃ€ๆต‹",
74
+ "rce_scan": "่ฟœ็จ‹ไปฃ็ ๆ‰ง่กŒๆฃ€ๆต‹",
75
+ "lfi_scan": "ๆœฌๅœฐๆ–‡ไปถๅŒ…ๅซๆฃ€ๆต‹",
76
+ "ssrf_scan": "ๆœๅŠก็ซฏ่ฏทๆฑ‚ไผช้€ ๆฃ€ๆต‹",
77
+ "redirect_scan": "ๅผ€ๆ”พ้‡ๅฎšๅ‘ๆฃ€ๆต‹",
78
+ "exposure_scan": "ๆ•ๆ„Ÿไฟกๆฏๆณ„้œฒๆฃ€ๆต‹",
79
+ "config_scan": "้…็ฝฎ้”™่ฏฏๆฃ€ๆต‹",
80
+ "default_login_scan": "้ป˜่ฎคๅ‡ญๆฎๆฃ€ๆต‹",
81
+ "ssl_scan": "SSL/TLS้…็ฝฎๆฃ€ๆต‹",
82
+ "dns_scan": "DNS็›ธๅ…ณๆฃ€ๆต‹",
83
+ "subdomain_takeover_scan": "ๅญๅŸŸๅๆŽฅ็ฎกๆฃ€ๆต‹",
84
+ "tech_scan": "ๆŠ€ๆœฏๆ ˆ่ฏ†ๅˆซ",
85
+ "panel_scan": "็ฎก็†้ขๆฟๆฃ€ๆต‹",
86
+ "http_protocol": "HTTPๅ่ฎฎๆ‰ซๆ",
87
+ "dns_protocol": "DNSๅ่ฎฎๆ‰ซๆ",
88
+ "tcp_protocol": "TCPๅ่ฎฎๆ‰ซๆ",
89
+ "ssl_protocol": "SSLๅ่ฎฎๆ‰ซๆ",
90
+ "critical_vulnerability": "ไธฅ้‡ๆผๆดžๆ‰ซๆ",
91
+ "high_risk_vulnerability": "้ซ˜ๅฑๆผๆดžๆ‰ซๆ",
92
+ "medium_risk_vulnerability": "ไธญๅฑๆผๆดžๆ‰ซๆ",
93
+ "no_security_issues": "ๆœชๅ‘็Žฐ็›ธๅ…ณๅฎ‰ๅ…จ้—ฎ้ข˜",
94
+ "found_issues": "ๅ‘็Žฐ{count}ไธช้—ฎ้ข˜",
95
+ "including": "ๅŒ…ๆ‹ฌ",
96
+ "and_more": "็ญ‰",
97
+ "security_check": "ๅฎ‰ๅ…จๆฃ€ๆŸฅ",
98
+ "no_issues_found": "ๆ— ๅ‘็Žฐ้—ฎ้ข˜",
99
+ "nuclei_check": "nucleiๆฃ€ๆŸฅ",
100
+ "nuclei_not_found": "Nucleiๅทฅๅ…ทๆœชๆ‰พๅˆฐใ€‚่ฏทๅฎ‰่ฃ…nuclei: go install -v github.com/projectdiscovery/nuclei/v3/cmd/nuclei@latest",
101
+ "no_severity_issues": "ๆœชๅ‘็Žฐ{severity}็บงๅˆซๅฎ‰ๅ…จ้—ฎ้ข˜",
102
+ "found_severity_issues": "ๅ‘็Žฐ{count}ไธช{severity}็บงๅˆซๅฎ‰ๅ…จ้—ฎ้ข˜",
103
+ "severity_level_scan": "{severity}็บงๅˆซๅฎ‰ๅ…จ้—ฎ้ข˜ๆ‰ซๆ",
104
+ "severity_level_vulnerability": "{severity}็บงๅˆซๅฎ‰ๅ…จๆผๆดžๆ‰ซๆ",
105
+ "matched_at": "ๅŒน้…ไฝ็ฝฎ",
106
+ "extracted": "ๆๅ–ๅ†…ๅฎน",
107
+ "no_details": "ๆ— ๆ›ดๅคš่ฏฆๆƒ…"
108
+ },
109
+ "ai_function": {
110
+ "intelligent_function_test": "ๆ™บ่ƒฝๅŠŸ่ƒฝๆต‹่ฏ•"
111
+ }
112
+ },
113
+ "common": {
114
+ "level": "็บงๅˆซ",
115
+ "issues": "้—ฎ้ข˜"
116
+ },
117
+ "display": {
118
+ "completed_tasks": "๐ŸŽ‰ ๅทฒๅฎŒๆˆไปปๅŠก",
119
+ "running_tasks": "๐Ÿš€ ๆญฃๅœจๆ‰ง่กŒไปปๅŠก",
120
+ "total_time": "โฑ๏ธ ๆ€ปๅ…ฑ่€—ๆ—ถ",
121
+ "error_tasks": "โš ๏ธ ้”™่ฏฏไปปๅŠกๅˆ—่กจ๏ผš",
122
+ "error_message": "้”™่ฏฏไฟกๆฏ๏ผš",
123
+ "task_execution_summary": "๐Ÿ“Š ไปปๅŠกๆ‰ง่กŒ็ปŸ่ฎก้ขๆฟ",
124
+ "no_issues_found": "ๆ— ๅ‘็Žฐ้—ฎ้ข˜"
125
+ }
126
+ }
127
+
webqa_agent/testers/basic_tester.py CHANGED
@@ -9,93 +9,111 @@ import requests
9
  from playwright.async_api import Page
10
 
11
  from webqa_agent.data.test_structures import SubTestReport, SubTestResult, TestStatus, SubTestScreenshot, SubTestStep
 
 
12
  from webqa_agent.utils.log_icon import icon
13
 
14
-
15
  class WebAccessibilityTest:
 
 
 
 
 
 
 
 
 
 
 
16
  async def run(self, url: str, sub_links: list) -> SubTestResult:
17
  logging.debug(f"Starting combined HTTPS and status check for {url}")
18
- result = SubTestResult(name="ๅฏ่ฎฟ้—ฎๆ€งๆฃ€ๆŸฅ")
19
 
20
- try:
21
- # check the main link
22
- main_valid, main_reason, main_expiry_date = await self.check_https_expiry(url)
23
- main_status = await self.check_page_status(url)
24
- main_url_result = {
25
- "url": url,
26
- "status": main_status,
27
- "https_valid": main_valid,
28
- "https_reason": main_reason,
29
- "https_expiry_date": main_expiry_date,
30
- }
31
-
32
- # check sub links
33
- sub_link_results = []
34
- failed_links = 0
35
- total_links = 1 # include main link
36
-
37
- if sub_links:
38
- total_links += len(sub_links)
39
- for link in sub_links:
40
- sub_result = {
41
- "url": link,
42
- "status": None,
43
- "https_valid": None,
44
- "https_reason": None,
45
- "https_expiry_date": None,
46
- }
47
- try:
48
- sub_result["https_valid"], sub_result["https_reason"], sub_result["https_expiry_date"] = (
49
- await self.check_https_expiry(link)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
50
  )
51
- except Exception as e:
52
- logging.error(f"Failed to check HTTPS for {link}: {str(e)}")
53
- sub_result["https"] = {"error": str(e)}
54
- try:
55
- sub_result["status"] = await self.check_page_status(link)
56
- except Exception as e:
57
- logging.error(f"Failed to check status for {link}: {str(e)}")
58
- sub_result["status"] = {"error": str(e)}
59
- sub_link_results.append(sub_result)
60
-
61
- # check if all passed
62
- def is_passed(item):
63
- https_valid = item["https_valid"]
64
- status_code = item["status"]
65
- # ensure status_code is an integer
66
- if isinstance(status_code, dict):
67
- return False # if status_code is a dict (contains error info), then test failed
68
- return https_valid and (status_code is not None and status_code < 400)
69
-
70
- all_passed = is_passed(main_url_result)
71
- if not all_passed:
72
- failed_links += 1
73
-
74
- if sub_links:
75
- for link in sub_link_results:
76
- if not is_passed(link):
77
- failed_links += 1
78
- all_passed = all_passed and all(is_passed(link) for link in sub_link_results)
79
-
80
- result.status = TestStatus.PASSED if all_passed else TestStatus.FAILED
81
-
82
- # add main link check steps
83
- result.report.append(SubTestReport(title="ไธป้“พๆŽฅๆฃ€ๆŸฅ", issues=f"ๆต‹่ฏ•็ป“ๆžœ: {main_url_result}"))
84
-
85
- # add sub link check steps
86
- if sub_links:
87
- for i, sub_link_result in enumerate(sub_link_results):
88
- result.report.append(
89
- SubTestReport(title=f"ๅญ้“พๆŽฅๆฃ€ๆŸฅ {i + 1}", issues=f"ๆต‹่ฏ•็ป“ๆžœ: {sub_link_result}")
90
- )
91
 
92
- except Exception as e:
93
- error_message = f"An error occurred in WebAccessibilityTest: {str(e)}"
94
- logging.error(error_message)
95
- result.status = TestStatus.FAILED
96
- result.messages = {"error": error_message}
97
 
98
- return result
99
 
100
  @staticmethod
101
  async def check_https_expiry(url: str, timeout: float = 10.0) -> tuple[bool, str, str]:
@@ -155,8 +173,18 @@ class WebAccessibilityTest:
155
 
156
  class PageButtonTest:
157
 
158
- @staticmethod
159
- async def run(url: str, page: Page, clickable_elements: dict, **kwargs) -> SubTestResult:
 
 
 
 
 
 
 
 
 
 
160
  """Run page button test.
161
 
162
  Args:
@@ -168,85 +196,88 @@ class PageButtonTest:
168
  SubTestResult containing test results and click screenshots
169
  """
170
 
171
- result = SubTestResult(name="ๅฏ็‚นๅ‡ปๅ…ƒ็ด ้ๅކๆฃ€ๆŸฅ")
172
  logging.info(f"{icon['running']} Running Sub Test: {result.name}")
173
  sub_test_results = []
174
- try:
175
- status = TestStatus.PASSED
176
- from webqa_agent.actions.click_handler import ClickHandler
177
-
178
- click_handler = ClickHandler()
179
- await click_handler.setup_listeners(page)
180
-
181
- # count total passed / failed
182
- total, total_failed = 0, 0
183
-
184
- if clickable_elements:
185
- for highlight_id, element in clickable_elements.items():
186
- # Run single test with the provided browser configuration
187
- element_text = element.get("selector", "Unknown")
188
- logging.info(f"Testing clickable element {highlight_id}...")
189
-
190
- try:
191
- current_url = page.url
192
- if current_url != url:
193
- await page.goto(url)
194
- await asyncio.sleep(0.5) # Wait for page to stabilize
195
-
196
- screenshots = []
197
- click_result = await click_handler.click_and_screenshot(page, element, highlight_id)
198
- if click_result.get("screenshot_after"):
199
- scr = click_result["screenshot_after"]
200
- if isinstance(scr, str):
201
- screenshots.append(SubTestScreenshot(type="base64", data=scr))
202
- elif isinstance(scr, dict):
203
- screenshots.append(SubTestScreenshot(**scr))
204
- if click_result.get("new_page_screenshot"):
205
- scr = click_result["new_page_screenshot"]
206
- if isinstance(scr, str):
207
- screenshots.append(SubTestScreenshot(type="base64", data=scr))
208
- elif isinstance(scr, dict):
209
- screenshots.append(SubTestScreenshot(**scr))
210
-
211
- business_success = click_result["success"]
212
- step = SubTestStep(
213
- id=int(highlight_id), description=f"็‚นๅ‡ปๅ…ƒ็ด : {element_text}", screenshots=screenshots
214
- )
215
- # Determine step status based on business result
216
- step_status = TestStatus.PASSED if business_success else TestStatus.FAILED
217
- step.status = step_status # record status for each step
218
- total += 1
219
- if step_status != TestStatus.PASSED:
 
 
 
 
 
 
 
 
 
 
 
 
220
  total_failed += 1
221
  status = TestStatus.FAILED
222
-
223
- # Brief pause between clicks
224
- await asyncio.sleep(0.5)
225
-
226
- except Exception as e:
227
- error_message = f"PageButtonTest error: {str(e)}"
228
- logging.error(error_message)
229
- step.status = TestStatus.FAILED
230
- step.errors = str(e)
231
- total_failed += 1
232
- status = TestStatus.FAILED
233
- finally:
234
- sub_test_results.append(step)
235
-
236
- logging.info(f"{icon['check']} Sub Test Completed: {result.name}")
237
- result.report.append(
238
- SubTestReport(
239
- title="้ๅކๆต‹่ฏ•็ป“ๆžœ",
240
- issues=f"ๅฏ็‚นๅ‡ปๅ…ƒ็ด {total}ไธช๏ผŒ็‚นๅ‡ป่กŒไธบๅคฑ่ดฅ{total_failed}ไธช",
241
  )
242
- )
243
 
244
- except Exception as e:
245
- error_message = f"PageButtonTest error: {str(e)}"
246
- logging.error(error_message)
247
- status = TestStatus.FAILED
248
- raise
249
 
250
- result.status = status
251
- result.steps = sub_test_results
252
- return result
 
 
 
 
 
 
 
9
  from playwright.async_api import Page
10
 
11
  from webqa_agent.data.test_structures import SubTestReport, SubTestResult, TestStatus, SubTestScreenshot, SubTestStep
12
+ from webqa_agent.utils import Display
13
+ from webqa_agent.utils import i18n
14
  from webqa_agent.utils.log_icon import icon
15
 
 
16
  class WebAccessibilityTest:
17
+ def __init__(self, report_config: dict = None):
18
+ self.language = report_config.get("language", "zh-CN") if report_config else "zh-CN"
19
+ self.localized_strings = {
20
+ 'zh-CN': i18n.get_lang_data('zh-CN').get('testers', {}).get('basic', {}),
21
+ 'en-US': i18n.get_lang_data('en-US').get('testers', {}).get('basic', {}),
22
+ }
23
+
24
+ def _get_text(self, key: str) -> str:
25
+ """Get localized text for the given key."""
26
+ return self.localized_strings.get(self.language, {}).get(key, key)
27
+
28
  async def run(self, url: str, sub_links: list) -> SubTestResult:
29
  logging.debug(f"Starting combined HTTPS and status check for {url}")
30
+ result = SubTestResult(name=self._get_text('accessibility_check'))
31
 
32
+ with Display.display(self._get_text('basic_test_display') + result.name):
33
+ try:
34
+ # check the main link
35
+ main_valid, main_reason, main_expiry_date = await self.check_https_expiry(url)
36
+ main_status = await self.check_page_status(url)
37
+ main_url_result = {
38
+ "url": url,
39
+ "status": main_status,
40
+ "https_valid": main_valid,
41
+ "https_reason": main_reason,
42
+ "https_expiry_date": main_expiry_date,
43
+ }
44
+
45
+ # check sub links
46
+ sub_link_results = []
47
+ failed_links = 0
48
+ total_links = 1 # include main link
49
+
50
+ if sub_links:
51
+ total_links += len(sub_links)
52
+ for link in sub_links:
53
+ sub_result = {
54
+ "url": link,
55
+ "status": None,
56
+ "https_valid": None,
57
+ "https_reason": None,
58
+ "https_expiry_date": None,
59
+ }
60
+ try:
61
+ sub_result["https_valid"], sub_result["https_reason"], sub_result["https_expiry_date"] = (
62
+ await self.check_https_expiry(link)
63
+ )
64
+ except Exception as e:
65
+ logging.error(f"Failed to check HTTPS for {link}: {str(e)}")
66
+ sub_result["https"] = {"error": str(e)}
67
+ try:
68
+ sub_result["status"] = await self.check_page_status(link)
69
+ except Exception as e:
70
+ logging.error(f"Failed to check status for {link}: {str(e)}")
71
+ sub_result["status"] = {"error": str(e)}
72
+ sub_link_results.append(sub_result)
73
+
74
+ # check if all passed
75
+ def is_passed(item):
76
+ https_valid = item["https_valid"]
77
+ status_code = item["status"]
78
+ # ensure status_code is an integer
79
+ if isinstance(status_code, dict):
80
+ return False # if status_code is a dict (contains error info), then test failed
81
+ return https_valid and (status_code is not None and status_code < 400)
82
+
83
+ all_passed = is_passed(main_url_result)
84
+ if not all_passed:
85
+ failed_links += 1
86
+
87
+ if sub_links:
88
+ for link in sub_link_results:
89
+ if not is_passed(link):
90
+ failed_links += 1
91
+ all_passed = all_passed and all(is_passed(link) for link in sub_link_results)
92
+
93
+ result.status = TestStatus.PASSED if all_passed else TestStatus.FAILED
94
+
95
+ # add main link check steps
96
+ result.report.append(SubTestReport(
97
+ title=self._get_text('main_link_check'),
98
+ issues=f"{self._get_text('test_results')}: {main_url_result}"))
99
+
100
+ # add sub link check steps
101
+ if sub_links:
102
+ for i, sub_link_result in enumerate(sub_link_results):
103
+ result.report.append(
104
+ SubTestReport(
105
+ title=f"{self._get_text('sub_link_check')} {i + 1}",
106
+ issues=f"{self._get_text('test_results')}: {sub_link_result}")
107
  )
108
+ logging.info(f"{icon['check']} Sub Test Completed: {result.name}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
109
 
110
+ except Exception as e:
111
+ error_message = f"An error occurred in WebAccessibilityTest: {str(e)}"
112
+ logging.error(error_message)
113
+ result.status = TestStatus.FAILED
114
+ result.messages = {"error": error_message}
115
 
116
+ return result
117
 
118
  @staticmethod
119
  async def check_https_expiry(url: str, timeout: float = 10.0) -> tuple[bool, str, str]:
 
173
 
174
  class PageButtonTest:
175
 
176
+ def __init__(self, report_config: dict = None):
177
+ self.language = report_config.get("language", "zh-CN") if report_config else "zh-CN"
178
+ self.localized_strings = {
179
+ 'zh-CN': i18n.get_lang_data('zh-CN').get('testers', {}).get('basic', {}),
180
+ 'en-US': i18n.get_lang_data('en-US').get('testers', {}).get('basic', {}),
181
+ }
182
+
183
+ def _get_text(self, key: str) -> str:
184
+ """Get localized text for the given key."""
185
+ return self.localized_strings.get(self.language, {}).get(key, key)
186
+
187
+ async def run(self, url: str, page: Page, clickable_elements: dict, **kwargs) -> SubTestResult:
188
  """Run page button test.
189
 
190
  Args:
 
196
  SubTestResult containing test results and click screenshots
197
  """
198
 
199
+ result = SubTestResult(name=self._get_text('clickable_element_check'))
200
  logging.info(f"{icon['running']} Running Sub Test: {result.name}")
201
  sub_test_results = []
202
+ with Display.display(self._get_text('basic_test_display') + result.name):
203
+ try:
204
+ status = TestStatus.PASSED
205
+ from webqa_agent.actions.click_handler import ClickHandler
206
+
207
+ click_handler = ClickHandler()
208
+ await click_handler.setup_listeners(page)
209
+
210
+ # count total passed / failed
211
+ total, total_failed = 0, 0
212
+
213
+ if clickable_elements:
214
+ for highlight_id, element in clickable_elements.items():
215
+ # Run single test with the provided browser configuration
216
+ element_text = element.get("selector", "Unknown")
217
+ logging.info(f"Testing clickable element {highlight_id}...")
218
+
219
+ try:
220
+ current_url = page.url
221
+ if current_url != url:
222
+ await page.goto(url)
223
+ await asyncio.sleep(0.5) # Wait for page to stabilize
224
+
225
+ screenshots = []
226
+ click_result = await click_handler.click_and_screenshot(page, element, highlight_id)
227
+ if click_result.get("screenshot_after"):
228
+ scr = click_result["screenshot_after"]
229
+ if isinstance(scr, str):
230
+ screenshots.append(SubTestScreenshot(type="base64", data=scr))
231
+ elif isinstance(scr, dict):
232
+ screenshots.append(SubTestScreenshot(**scr))
233
+ if click_result.get("new_page_screenshot"):
234
+ scr = click_result["new_page_screenshot"]
235
+ if isinstance(scr, str):
236
+ screenshots.append(SubTestScreenshot(type="base64", data=scr))
237
+ elif isinstance(scr, dict):
238
+ screenshots.append(SubTestScreenshot(**scr))
239
+
240
+ business_success = click_result["success"]
241
+ step = SubTestStep(
242
+ id=int(highlight_id), description=f"{self._get_text('click_element')}: {element_text}", screenshots=screenshots
243
+ )
244
+ # Determine step status based on business result
245
+ step_status = TestStatus.PASSED if business_success else TestStatus.FAILED
246
+ step.status = step_status # record status for each step
247
+ total += 1
248
+ if step_status != TestStatus.PASSED:
249
+ total_failed += 1
250
+ status = TestStatus.FAILED
251
+
252
+ # Brief pause between clicks
253
+ await asyncio.sleep(0.5)
254
+
255
+ except Exception as e:
256
+ error_message = f"PageButtonTest error: {str(e)}"
257
+ logging.error(error_message)
258
+ step.status = TestStatus.FAILED
259
+ step.errors = str(e)
260
  total_failed += 1
261
  status = TestStatus.FAILED
262
+ finally:
263
+ sub_test_results.append(step)
264
+
265
+ logging.info(f"{icon['check']} Sub Test Completed: {result.name}")
266
+ result.report.append(
267
+ SubTestReport(
268
+ title=self._get_text('traversal_test_results'),
269
+ issues=f"{self._get_text('clickable_elements_count')}{total}{self._get_text('click_failed_count')}{total_failed}",
270
+ )
 
 
 
 
 
 
 
 
 
 
271
  )
 
272
 
273
+ logging.info(f"{icon['check']} Sub Test Completed: {result.name}")
 
 
 
 
274
 
275
+ except Exception as e:
276
+ error_message = f"PageButtonTest error: {str(e)}"
277
+ logging.error(error_message)
278
+ status = TestStatus.FAILED
279
+ raise
280
+
281
+ result.status = status
282
+ result.steps = sub_test_results
283
+ return result
webqa_agent/testers/case_gen/graph.py CHANGED
@@ -97,11 +97,13 @@ async def plan_test_cases(state: MainGraphState) -> Dict[str, List[Dict[str, Any
97
  business_objectives = state.get("business_objectives", "No specific business objectives provided.")
98
  completed_cases = state.get("completed_cases")
99
 
 
100
  system_prompt = get_test_case_planning_system_prompt(
101
  business_objectives=business_objectives,
102
  completed_cases=completed_cases,
103
  reflection_history=state.get("reflection_history"),
104
  remaining_objectives=state.get("remaining_objectives"),
 
105
  )
106
 
107
  # Use explicit template for planning to include element attributes
@@ -300,12 +302,14 @@ async def reflect_and_replan(state: MainGraphState) -> dict:
300
  logging.debug(f"Reflection analysis enhanced with {len(page_content_summary)} interactive elements")
301
 
302
  # ไฝฟ็”จๆ–ฐ็š„ๅๆ€ๆ็คบ่ฏๅ‡ฝๆ•ฐ๏ผŒไผ ๅ…ฅpage_content_summary
 
303
  system_prompt, user_prompt = get_reflection_prompt(
304
  business_objectives=state.get("business_objectives"),
305
  current_plan=state["test_cases"],
306
  completed_cases=state["completed_cases"],
307
  page_structure=page_structure,
308
  page_content_summary=page_content_summary,
 
309
  )
310
 
311
  logging.info("Reflection and Replanning analysis - Sending request to LLM...")
@@ -380,7 +384,11 @@ async def execute_single_case(state: MainGraphState) -> dict:
380
  ui_tester_instance = state["ui_tester_instance"]
381
  case_name = case.get("name")
382
 
383
- with Display.display(f"ๆ™บ่ƒฝๅŠŸ่ƒฝๆต‹่ฏ• - {case_name}"):
 
 
 
 
384
  # === ๅผ€ๅง‹่ทŸ่ธชcaseๆ•ฐๆฎ ===
385
  # ไฝฟ็”จstart_caseๆฅๅŒๆ—ถ่ฎพ็ฝฎๅ็งฐๅ’Œๅผ€ๅง‹ๆ•ฐๆฎ่ทŸ่ธช
386
  ui_tester_instance.start_case(case_name, case)
 
97
  business_objectives = state.get("business_objectives", "No specific business objectives provided.")
98
  completed_cases = state.get("completed_cases")
99
 
100
+ language = state.get('language', 'zh-CN')
101
  system_prompt = get_test_case_planning_system_prompt(
102
  business_objectives=business_objectives,
103
  completed_cases=completed_cases,
104
  reflection_history=state.get("reflection_history"),
105
  remaining_objectives=state.get("remaining_objectives"),
106
+ language=language,
107
  )
108
 
109
  # Use explicit template for planning to include element attributes
 
302
  logging.debug(f"Reflection analysis enhanced with {len(page_content_summary)} interactive elements")
303
 
304
  # ไฝฟ็”จๆ–ฐ็š„ๅๆ€ๆ็คบ่ฏๅ‡ฝๆ•ฐ๏ผŒไผ ๅ…ฅpage_content_summary
305
+ language = state.get('language', 'zh-CN')
306
  system_prompt, user_prompt = get_reflection_prompt(
307
  business_objectives=state.get("business_objectives"),
308
  current_plan=state["test_cases"],
309
  completed_cases=state["completed_cases"],
310
  page_structure=page_structure,
311
  page_content_summary=page_content_summary,
312
+ language=language,
313
  )
314
 
315
  logging.info("Reflection and Replanning analysis - Sending request to LLM...")
 
384
  ui_tester_instance = state["ui_tester_instance"]
385
  case_name = case.get("name")
386
 
387
+ language = state.get('language', 'zh-CN')
388
+ logging.debug(f"Execute case language: {language}")
389
+ default_text = 'ๆ™บ่ƒฝๅŠŸ่ƒฝๆต‹่ฏ•' if language == 'zh-CN' else 'AI Function Test'
390
+
391
+ with Display.display(f"{default_text} - {case_name}"):
392
  # === ๅผ€ๅง‹่ทŸ่ธชcaseๆ•ฐๆฎ ===
393
  # ไฝฟ็”จstart_caseๆฅๅŒๆ—ถ่ฎพ็ฝฎๅ็งฐๅ’Œๅผ€ๅง‹ๆ•ฐๆฎ่ทŸ่ธช
394
  ui_tester_instance.start_case(case_name, case)
webqa_agent/testers/case_gen/prompts/planning_prompts.py CHANGED
@@ -3,18 +3,22 @@
3
  import json
4
 
5
 
6
- def get_shared_test_design_standards() -> str:
7
  """Get shared test case design standards for reuse in plan and reflect modules.
8
 
 
 
 
9
  Returns:
10
  String containing complete test case design standards
11
  """
12
- return """## Enhanced Test Case Design Standards
 
13
 
14
  ### Domain-Aware Test Case Structure Requirements
15
  Each test case must include these standardized components with enhanced business context:
16
 
17
- - **`name`**: ็ฎ€ๆด็›ด่ง‚็š„ไธญๆ–‡ๆต‹่ฏ•ๅ็งฐ๏ผŒๅๆ˜ ไธšๅŠกๅœบๆ™ฏๅ’Œๆต‹่ฏ•็›ฎ็š„
18
  - **`objective`**: Clear statement linking the test to specific business requirements and domain context
19
  - **`test_category`**: Enhanced classification including domain-specific categories (Ecommerce_Functional, Banking_Security, Healthcare_Compliance, etc.)
20
  - **`priority`**: Test priority level based on comprehensive impact assessment (Critical, High, Medium, Low):
@@ -174,12 +178,14 @@ def get_test_case_planning_system_prompt(
174
  completed_cases: list = None,
175
  reflection_history: list = None,
176
  remaining_objectives: str = None,
 
177
  ) -> str:
178
  """Generate system prompt for test case planning.
179
 
180
  Args:
181
  business_objectives: Business objectives
182
  completed_cases: Completed test cases (for replanning)
 
183
  reflection_history: Reflection history (for replanning)
184
  remaining_objectives: Remaining objectives (for replanning)
185
 
@@ -380,7 +386,7 @@ Leverage deeper business domain insights and execution learnings to generate ref
380
  3. The most valuable next action
381
  """
382
 
383
- shared_standards = get_shared_test_design_standards()
384
 
385
  system_prompt = f"""
386
  {role_and_objective}
@@ -534,14 +540,17 @@ Example 1:
534
  return user_prompt
535
 
536
 
537
- def get_reflection_system_prompt() -> str:
538
  """Generate system prompt for reflection and replanning (static part).
539
 
 
 
 
540
  Returns:
541
  Formatted system prompt containing role definition, decision framework, and output format
542
  """
543
-
544
- shared_standards = get_shared_test_design_standards()
545
 
546
  return f"""## Role
547
  You are a Senior QA Testing Professional responsible for dynamic test execution oversight with enhanced business domain awareness and contextual understanding. Your expertise includes business process analysis, domain-specific testing, user experience evaluation, and strategic decision-making based on comprehensive execution insights.
@@ -650,7 +659,7 @@ IF (len(completed_cases) < len(current_plan)
650
  }},
651
  "new_plan": [
652
  {{
653
- "name": "ไฟฎ่ฎขๅŽ็š„ๆต‹่ฏ•็”จไพ‹๏ผˆไธญๆ–‡ๅ‘ฝๅ๏ผ‰",
654
  "objective": "clear_test_purpose_aligned_with_remaining_business_objectives",
655
  "test_category": "enhanced_category_classification",
656
  "priority": "priority_based_on_business_impact",
@@ -803,11 +812,13 @@ def get_reflection_prompt(
803
  completed_cases: list,
804
  page_structure: str,
805
  page_content_summary: dict = None,
 
806
  ) -> tuple[str, str]:
807
  """Generate prompts for reflection and replanning (returns system and user prompt).
808
 
809
  Args:
810
  business_objectives: Overall business objectives
 
811
  current_plan: Current test plan
812
  completed_cases: Completed test cases
813
  page_structure: Current UI text structure
@@ -816,7 +827,7 @@ def get_reflection_prompt(
816
  Returns:
817
  tuple: (system_prompt, user_prompt)
818
  """
819
- system_prompt = get_reflection_system_prompt()
820
  user_prompt = get_reflection_user_prompt(
821
  business_objectives, current_plan, completed_cases, page_structure, page_content_summary
822
  )
 
3
  import json
4
 
5
 
6
+ def get_shared_test_design_standards(language: str = 'zh-CN') -> str:
7
  """Get shared test case design standards for reuse in plan and reflect modules.
8
 
9
+ Args:
10
+ language: Language for test case naming (zh-CN or en-US)
11
+
12
  Returns:
13
  String containing complete test case design standards
14
  """
15
+ name_language = 'ไธญๆ–‡' if language == 'zh-CN' else 'English'
16
+ return f"""## Enhanced Test Case Design Standards
17
 
18
  ### Domain-Aware Test Case Structure Requirements
19
  Each test case must include these standardized components with enhanced business context:
20
 
21
+ - **`name`**: ็ฎ€ๆด็›ด่ง‚็š„ๆต‹่ฏ•ๅ็งฐ๏ผŒๅๆ˜ ไธšๅŠกๅœบๆ™ฏๅ’Œๆต‹่ฏ•็›ฎ็š„ (ไฝฟ็”จ{name_language}ๅ‘ฝๅ)
22
  - **`objective`**: Clear statement linking the test to specific business requirements and domain context
23
  - **`test_category`**: Enhanced classification including domain-specific categories (Ecommerce_Functional, Banking_Security, Healthcare_Compliance, etc.)
24
  - **`priority`**: Test priority level based on comprehensive impact assessment (Critical, High, Medium, Low):
 
178
  completed_cases: list = None,
179
  reflection_history: list = None,
180
  remaining_objectives: str = None,
181
+ language: str = 'zh-CN',
182
  ) -> str:
183
  """Generate system prompt for test case planning.
184
 
185
  Args:
186
  business_objectives: Business objectives
187
  completed_cases: Completed test cases (for replanning)
188
+ language: Language for test case naming (zh-CN or en-US)
189
  reflection_history: Reflection history (for replanning)
190
  remaining_objectives: Remaining objectives (for replanning)
191
 
 
386
  3. The most valuable next action
387
  """
388
 
389
+ shared_standards = get_shared_test_design_standards(language)
390
 
391
  system_prompt = f"""
392
  {role_and_objective}
 
540
  return user_prompt
541
 
542
 
543
+ def get_reflection_system_prompt(language: str = 'zh-CN') -> str:
544
  """Generate system prompt for reflection and replanning (static part).
545
 
546
+ Args:
547
+ language: Language for test case naming (zh-CN or en-US)
548
+
549
  Returns:
550
  Formatted system prompt containing role definition, decision framework, and output format
551
  """
552
+ name_language = 'ไธญๆ–‡' if language == 'zh-CN' else 'English'
553
+ shared_standards = get_shared_test_design_standards(language)
554
 
555
  return f"""## Role
556
  You are a Senior QA Testing Professional responsible for dynamic test execution oversight with enhanced business domain awareness and contextual understanding. Your expertise includes business process analysis, domain-specific testing, user experience evaluation, and strategic decision-making based on comprehensive execution insights.
 
659
  }},
660
  "new_plan": [
661
  {{
662
+ "name": "ไฟฎ่ฎขๅŽ็š„ๆต‹่ฏ•็”จไพ‹๏ผˆ{name_language}ๅ‘ฝๅ๏ผ‰",
663
  "objective": "clear_test_purpose_aligned_with_remaining_business_objectives",
664
  "test_category": "enhanced_category_classification",
665
  "priority": "priority_based_on_business_impact",
 
812
  completed_cases: list,
813
  page_structure: str,
814
  page_content_summary: dict = None,
815
+ language: str = 'zh-CN',
816
  ) -> tuple[str, str]:
817
  """Generate prompts for reflection and replanning (returns system and user prompt).
818
 
819
  Args:
820
  business_objectives: Overall business objectives
821
+ language: Language for test case naming (zh-CN or en-US)
822
  current_plan: Current test plan
823
  completed_cases: Completed test cases
824
  page_structure: Current UI text structure
 
827
  Returns:
828
  tuple: (system_prompt, user_prompt)
829
  """
830
+ system_prompt = get_reflection_system_prompt(language)
831
  user_prompt = get_reflection_user_prompt(
832
  business_objectives, current_plan, completed_cases, page_structure, page_content_summary
833
  )
webqa_agent/testers/case_gen/state/schemas.py CHANGED
@@ -9,6 +9,7 @@ class MainGraphState(TypedDict):
9
 
10
  url: str
11
  business_objectives: Optional[str]
 
12
  cookies: Optional[str]
13
  test_cases: List[dict]
14
  # To manage the loop
 
9
 
10
  url: str
11
  business_objectives: Optional[str]
12
+ language: Optional[str]
13
  cookies: Optional[str]
14
  test_cases: List[dict]
15
  # To manage the loop
webqa_agent/testers/performance_tester.py CHANGED
@@ -5,11 +5,23 @@ import os
5
  import tempfile
6
 
7
  from webqa_agent.data import TestStatus
 
8
  from webqa_agent.data.test_structures import SubTestReport, SubTestResult
9
 
10
 
11
  class LighthouseMetricsTest:
12
 
 
 
 
 
 
 
 
 
 
 
 
13
  async def run(self, url: str, browser_config: dict = None, **kwargs) -> SubTestResult:
14
  """Run Lighthouse test on the given URL.
15
 
@@ -17,7 +29,7 @@ class LighthouseMetricsTest:
17
  url: The URL to test
18
  browser_config: Config of browser
19
  """
20
- test_name = f"Lighthouseๆฃ€ๆŸฅ_{browser_config['viewport']['width']}x{browser_config['viewport']['height']}"
21
  result = SubTestResult(name=test_name)
22
 
23
  try:
@@ -502,7 +514,7 @@ class LighthouseMetricsTest:
502
 
503
  # 7. Generate prioritized recommendations (including performance and SEO)
504
  prioritized_recommendations = self._generate_recommendations(
505
- core_vitals, opportunities, diagnostics, page_stats, seo_issues
506
  )
507
 
508
  # 8. Summarize scores for each category
@@ -523,17 +535,17 @@ class LighthouseMetricsTest:
523
 
524
  # 9.1 Four category scores
525
  score_str = "\n".join([f"- {k}: {v}" for k, v in category_scores.items()])
526
- simple_report = [{"title": "ๆ•ดไฝ“่ฏ„ๅˆ†", "issues": score_str}]
527
 
528
  # 9.2 Prioritized recommendations / potential issues
529
  if prioritized_recommendations:
530
  simple_report.append(
531
- {"title": "ๅพ…ๆ”น่ฟ›้—ฎ้ข˜", "issues": "\n".join([f"- {rec}" for rec in prioritized_recommendations])}
532
  )
533
 
534
  # 9.3 Key performance metrics
535
  perf_metrics_str = "\n".join([f"- {m['name']}: {m['display_value']}" for m in performance_metrics.values()])
536
- simple_report.append({"title": "ๆ€ง่ƒฝๆŒ‡ๆ ‡", "issues": perf_metrics_str})
537
 
538
  # 9.4 Return comprehensive results
539
  result = {
@@ -674,8 +686,8 @@ class LighthouseMetricsTest:
674
  return "moderate"
675
  else:
676
  return "minor"
677
- @staticmethod
678
- def _generate_recommendations(core_vitals, opportunities, diagnostics, page_stats, seo_issues):
679
  """Generate prioritized recommendations."""
680
  recommendations = []
681
 
@@ -689,7 +701,7 @@ class LighthouseMetricsTest:
689
  for vital_id, info in vitals_thresholds.items():
690
  if vital_id in core_vitals and not core_vitals[vital_id].get("passes_threshold"):
691
  recommendations.append(
692
- f"ๆ ธๅฟƒๆŒ‡ๆ ‡: ๆ”น่ฟ›{info['name']}๏ผˆๅฝ“ๅ‰ๅ€ผ๏ผš{core_vitals[vital_id].get('display_value')}, ็›ฎๆ ‡๏ผš< {info['threshold']}{info['unit']}๏ผ‰"
693
  )
694
 
695
  # 2. Time-saving based opportunity recommendations (maximum 3)
@@ -697,22 +709,22 @@ class LighthouseMetricsTest:
697
  for opportunity in sorted_opportunities[:3]:
698
  savings = ""
699
  if opportunity.get("savings_ms"):
700
- savings = f"๏ผˆๆฝœๅœจ่Š‚็œ๏ผš{opportunity.get('savings_ms')}ms๏ผ‰"
701
- recommendations.append(f"ๆ€ง่ƒฝไผ˜ๅŒ–: {opportunity.get('title')}{savings}")
702
 
703
  # 3. Page statistics based recommendations
704
  if page_stats.get("total_size_kb", 0) > 3000: # ่ถ…่ฟ‡3MB
705
- recommendations.append(f"่ต„ๆบไผ˜ๅŒ–: ๅ‡ๅฐ‘้กต้ขๆ€ปๅคงๅฐ๏ผˆๅฝ“ๅ‰๏ผš{page_stats.get('total_size_kb') / 1024:.1f}MB๏ผ‰")
706
 
707
  if page_stats.get("third_party_size_kb", 0) > 500: # ็ฌฌไธ‰ๆ–น่ต„ๆบ่ถ…่ฟ‡500KB
708
  recommendations.append(
709
- f"่ต„ๆบไผ˜ๅŒ–: ไผ˜ๅŒ–็ฌฌไธ‰ๆ–น่ต„ๆบไฝฟ็”จ๏ผˆๅฝ“ๅ‰๏ผš{page_stats.get('third_party_size_kb') / 1024:.1f}MB๏ผ‰"
710
  )
711
 
712
  # 4. Diagnostic issue recommendations (maximum 2 critical issues)
713
  critical_diagnostics = [d for d in diagnostics if d.get("impact") == "critical"]
714
  for diagnostic in critical_diagnostics:
715
- recommendations.append(f"ๆ€ง่ƒฝ่ฏŠๆ–ญ: {diagnostic.get('title')}")
716
 
717
  # 5. SEO issue recommendations (sorted by impact level)
718
  seo_issues_sorted = sorted(
@@ -722,16 +734,16 @@ class LighthouseMetricsTest:
722
  )
723
 
724
  for seo_issue in seo_issues_sorted[:5]: # ๆœ€ๅคšๆ˜พ็คบ5ไธชSEO้—ฎ้ข˜
725
- recommendation = f"SEO: {seo_issue.get('title')}"
726
 
727
  # ๆทปๅŠ ๅ…ทไฝ“็š„่ฏฆๆƒ…ไฟกๆฏ๏ผˆๅฆ‚ๆžœๆœ‰็š„่ฏ๏ผ‰
728
  details = seo_issue.get("details", {})
729
  if details.get("images_count"):
730
- recommendation += f" ({details['images_count']} ไธชๅ›พ็‰‡)"
731
  elif details.get("links_count"):
732
- recommendation += f" ({details['links_count']} ไธช้“พๆŽฅ)"
733
  elif details.get("problematic_links"):
734
- recommendation += f" (ไพ‹ๅฆ‚: {', '.join(details['problematic_links'][:2])})"
735
 
736
  recommendations.append(recommendation)
737
 
 
5
  import tempfile
6
 
7
  from webqa_agent.data import TestStatus
8
+ from webqa_agent.utils import i18n
9
  from webqa_agent.data.test_structures import SubTestReport, SubTestResult
10
 
11
 
12
  class LighthouseMetricsTest:
13
 
14
+ def __init__(self, report_config: dict = None):
15
+ self.language = report_config.get("language", "zh-CN") if report_config else "zh-CN"
16
+ self.localized_strings = {
17
+ 'zh-CN': i18n.get_lang_data('zh-CN').get('testers', {}).get('performance', {}),
18
+ 'en-US': i18n.get_lang_data('en-US').get('testers', {}).get('performance', {}),
19
+ }
20
+
21
+ def _get_text(self, key: str) -> str:
22
+ """Get localized text for the given key."""
23
+ return self.localized_strings.get(self.language, {}).get(key, key)
24
+
25
  async def run(self, url: str, browser_config: dict = None, **kwargs) -> SubTestResult:
26
  """Run Lighthouse test on the given URL.
27
 
 
29
  url: The URL to test
30
  browser_config: Config of browser
31
  """
32
+ test_name = f"Lighthouse_{browser_config['viewport']['width']}x{browser_config['viewport']['height']}"
33
  result = SubTestResult(name=test_name)
34
 
35
  try:
 
514
 
515
  # 7. Generate prioritized recommendations (including performance and SEO)
516
  prioritized_recommendations = self._generate_recommendations(
517
+ core_vitals, opportunities, diagnostics, page_stats, seo_issues, self.language
518
  )
519
 
520
  # 8. Summarize scores for each category
 
535
 
536
  # 9.1 Four category scores
537
  score_str = "\n".join([f"- {k}: {v}" for k, v in category_scores.items()])
538
+ simple_report = [{"title": self._get_text('overall_score'), "issues": score_str}]
539
 
540
  # 9.2 Prioritized recommendations / potential issues
541
  if prioritized_recommendations:
542
  simple_report.append(
543
+ {"title": self._get_text('issues_to_improve'), "issues": "\n".join([f"- {rec}" for rec in prioritized_recommendations])}
544
  )
545
 
546
  # 9.3 Key performance metrics
547
  perf_metrics_str = "\n".join([f"- {m['name']}: {m['display_value']}" for m in performance_metrics.values()])
548
+ simple_report.append({"title": self._get_text('performance_metrics'), "issues": perf_metrics_str})
549
 
550
  # 9.4 Return comprehensive results
551
  result = {
 
686
  return "moderate"
687
  else:
688
  return "minor"
689
+
690
+ def _generate_recommendations(self, core_vitals, opportunities, diagnostics, page_stats, seo_issues, language="zh-CN"):
691
  """Generate prioritized recommendations."""
692
  recommendations = []
693
 
 
701
  for vital_id, info in vitals_thresholds.items():
702
  if vital_id in core_vitals and not core_vitals[vital_id].get("passes_threshold"):
703
  recommendations.append(
704
+ f"{self._get_text('core_metrics')}: {self._get_text('improve')}{info['name']}๏ผˆ{self._get_text('current_value')}๏ผš{core_vitals[vital_id].get('display_value')}, {self._get_text('target')}๏ผš< {info['threshold']}{info['unit']}๏ผ‰"
705
  )
706
 
707
  # 2. Time-saving based opportunity recommendations (maximum 3)
 
709
  for opportunity in sorted_opportunities[:3]:
710
  savings = ""
711
  if opportunity.get("savings_ms"):
712
+ savings = f"๏ผˆ{self._get_text('potential_savings')}๏ผš{opportunity.get('savings_ms')}ms๏ผ‰"
713
+ recommendations.append(f"{self._get_text('performance_optimization')}: {opportunity.get('title')}{savings}")
714
 
715
  # 3. Page statistics based recommendations
716
  if page_stats.get("total_size_kb", 0) > 3000: # ่ถ…่ฟ‡3MB
717
+ recommendations.append(f"{self._get_text('resource_optimization')}: {self._get_text('reduce_total_size')}๏ผˆ{self._get_text('current')}๏ผš{page_stats.get('total_size_kb') / 1024:.1f}MB๏ผ‰")
718
 
719
  if page_stats.get("third_party_size_kb", 0) > 500: # ็ฌฌไธ‰ๆ–น่ต„ๆบ่ถ…่ฟ‡500KB
720
  recommendations.append(
721
+ f"{self._get_text('resource_optimization')}: {self._get_text('optimize_third_party')}๏ผˆ{self._get_text('current')}๏ผš{page_stats.get('third_party_size_kb') / 1024:.1f}MB๏ผ‰"
722
  )
723
 
724
  # 4. Diagnostic issue recommendations (maximum 2 critical issues)
725
  critical_diagnostics = [d for d in diagnostics if d.get("impact") == "critical"]
726
  for diagnostic in critical_diagnostics:
727
+ recommendations.append(f"{self._get_text('performance_diagnosis')}: {diagnostic.get('title')}")
728
 
729
  # 5. SEO issue recommendations (sorted by impact level)
730
  seo_issues_sorted = sorted(
 
734
  )
735
 
736
  for seo_issue in seo_issues_sorted[:5]: # ๆœ€ๅคšๆ˜พ็คบ5ไธชSEO้—ฎ้ข˜
737
+ recommendation = f"{self._get_text('seo')}: {seo_issue.get('title')}"
738
 
739
  # ๆทปๅŠ ๅ…ทไฝ“็š„่ฏฆๆƒ…ไฟกๆฏ๏ผˆๅฆ‚ๆžœๆœ‰็š„่ฏ๏ผ‰
740
  details = seo_issue.get("details", {})
741
  if details.get("images_count"):
742
+ recommendation += f" ({details['images_count']} {self._get_text('images')})"
743
  elif details.get("links_count"):
744
+ recommendation += f" ({details['links_count']} {self._get_text('links')})"
745
  elif details.get("problematic_links"):
746
+ recommendation += f" ({self._get_text('example')}: {', '.join(details['problematic_links'][:2])})"
747
 
748
  recommendations.append(recommendation)
749
 
webqa_agent/testers/ux_tester.py CHANGED
@@ -20,6 +20,7 @@ from webqa_agent.llm.llm_api import LLMAPI
20
  from webqa_agent.llm.prompt import LLMPrompt
21
  from webqa_agent.utils import Display
22
  from webqa_agent.utils.log_icon import icon
 
23
 
24
  try:
25
  from PIL import Image, ImageDraw
@@ -30,10 +31,19 @@ except Exception:
30
 
31
  class PageTextTest:
32
 
33
- def __init__(self, llm_config: dict, user_cases: List[str] = None):
34
  self.llm_config = llm_config
35
  self.user_cases = user_cases or LLMPrompt.TEXT_USER_CASES
36
  self.llm = LLMAPI(self.llm_config)
 
 
 
 
 
 
 
 
 
37
 
38
  async def get_iframe_content(self, frame):
39
  # get iframe content
@@ -46,10 +56,10 @@ class PageTextTest:
46
  async def run(self, page: Page) -> SubTestResult:
47
  """Runs a test to check the text content of a web page and identifies
48
  any issues based on predefined user cases."""
49
- result = SubTestResult(name='ๆ–‡ๆœฌๆฃ€ๆŸฅ')
50
  logging.info(f"{icon['running']} Running Sub Test: {result.name}")
51
 
52
- with Display.display('็”จๆˆทไฝ“้ชŒๆต‹่ฏ• - ' + result.name):
53
  try:
54
  # ๅˆ›ๅปบActionHandler็”จไบŽๆˆชๅ›พ
55
  action_handler = ActionHandler()
@@ -61,7 +71,7 @@ class PageTextTest:
61
  if is_blank:
62
  logging.error('page is blank, no visible content')
63
  result.status = TestStatus.FAILED
64
- result.messages = {'page': '้กต้ข็™ฝๅฑ๏ผŒๆฒกๆœ‰ไปปไฝ•ๅฏ่งๅ†…ๅฎน'}
65
  return result
66
 
67
  logging.debug('page is not blank, start crawling page content')
@@ -89,10 +99,10 @@ class PageTextTest:
89
  issues = self.format_issues_to_markdown(test_page_content)
90
  else:
91
  result.status = TestStatus.PASSED
92
- issues = 'ๆ— ๅ‘็Žฐ้—ฎ้ข˜'
93
  result.report.append(
94
  SubTestReport(
95
- title="ๆ–‡ๆœฌๆฃ€ๆŸฅ",
96
  issues=issues,
97
  )
98
  )
@@ -107,33 +117,31 @@ class PageTextTest:
107
 
108
  return result
109
 
110
- @staticmethod
111
- def _build_prompt(page_text: str, user_case: str) -> str:
112
- """ๆž„ๅปบLLMๆ็คบ."""
113
- return f"""ไปปๅŠกๆ่ฟฐ๏ผšๆ นๆฎๆไพ›็š„็ฝ‘้กตๅ†…ๅฎน็”จๆˆท็”จไพ‹๏ผŒๆฃ€ๆŸฅๆ˜ฏๅฆๅญ˜ๅœจไปปไฝ•้”™ๅˆซๅญ—๏ผŒๆˆ–่€…่‹ฑๆ–‡่ฏญๆณ•้”™่ฏฏใ€‚ๅฆ‚ๆžœๅ‘็Žฐ้”™่ฏฏ๏ผŒ่ฏทๆŒ‰็…งๆŒ‡ๅฎš็š„JSONๆ ผๅผ่พ“ๅ‡บ็ป“ๆžœใ€‚
114
- ่พ“ๅ…ฅไฟกๆฏ๏ผš
115
- - ็ฝ‘้กตๅ†…ๅฎน๏ผš${page_text}
116
- - ็”จๆˆท็”จไพ‹๏ผš${user_case}
117
- ่พ“ๅ‡บ่ฆๆฑ‚๏ผš
118
- - ๅฆ‚ๆžœๆฒกๆœ‰ๅ‘็Žฐ้”™่ฏฏ๏ผŒ่ฏทๅช่พ“ๅ‡บ None ๏ผŒไธ่ฆๅŒ…ๅซไปปไฝ•่งฃ้‡Šใ€‚
119
- - ๅฆ‚ๆžœๅ‘็Žฐไบ†้”™่ฏฏ๏ผŒ่ฏทไฝฟ็”จไปฅไธ‹JSONๆ ผๅผ่พ“ๅ‡บ๏ผš
120
  {{
121
  "error": [
122
  {{
123
- "location": "้”™่ฏฏไฝ็ฝฎๆ่ฟฐ",
124
- "current": "ๅฝ“ๅ‰้”™่ฏฏๅ†…ๅฎน",
125
- "suggested": "ๅปบ่ฎฎไฟฎๆ”นๅ†…ๅฎน",
126
- "type": "้”™่ฏฏ็ฑปๅž‹"
127
  }}
128
  ],
129
- "reason": "ๆ€ปไฝ“้—ฎ้ข˜ๅŽŸๅ› ่ฏดๆ˜Ž"
130
  }}
131
  """
132
 
133
- @staticmethod
134
- def format_issues_to_markdown(issues_content: str) -> str:
135
  # Format issues to markdown
136
- if not issues_content or issues_content == 'ๆ— ๅ‘็Žฐ้—ฎ้ข˜':
137
  return issues_content
138
 
139
  try:
@@ -150,29 +158,30 @@ class PageTextTest:
150
  reason_summary = data['reason']
151
 
152
  if not errors:
153
- return 'ๆ— ๅ‘็Žฐ้—ฎ้ข˜'
154
 
 
155
  if reason_summary:
156
- markdown_content = f"**ๆ€ปไฝ“้—ฎ้ข˜๏ผš** {reason_summary}\n\n"
157
 
158
  if isinstance(errors, list):
159
  for i, error_item in enumerate(errors, 1):
160
  if isinstance(error_item, dict):
161
- location = error_item.get('location', 'ๆœช็Ÿฅไฝ็ฝฎ')
162
  current = error_item.get('current', '')
163
  suggested = error_item.get('suggested', '')
164
- error_type = error_item.get('type', 'ๆœช็Ÿฅ็ฑปๅž‹')
165
 
166
- markdown_content += f"### {i}. ้—ฎ้ข˜่ฏฆๆƒ…\n\n"
167
- markdown_content += f"**ไฝ็ฝฎ๏ผš** {location}\n\n"
168
- markdown_content += f"**้”™่ฏฏๅ†…ๅฎน๏ผš** `{current}`\n\n"
169
- markdown_content += f"**ๅปบ่ฎฎไฟฎๆ”น๏ผš** `{suggested}`\n\n"
170
- markdown_content += f"**้”™่ฏฏ็ฑปๅž‹๏ผš** {error_type}\n\n"
171
  else:
172
- markdown_content += f"### {i}. ้—ฎ้ข˜่ฏฆๆƒ…\n\n"
173
- markdown_content += f"**้”™่ฏฏๅ†…ๅฎน๏ผš** {error_item}\n\n"
174
  else:
175
- markdown_content += f"**้”™่ฏฏๅ†…ๅฎน๏ผš** {errors}\n\n"
176
 
177
  return markdown_content
178
  else:
@@ -184,10 +193,19 @@ class PageTextTest:
184
 
185
  class PageContentTest:
186
 
187
- def __init__(self, llm_config: dict, user_cases: List[str] = None):
188
  self.llm_config = llm_config
189
  self.user_cases = user_cases or LLMPrompt.CONTENT_USER_CASES
190
  self.llm = LLMAPI(self.llm_config)
 
 
 
 
 
 
 
 
 
191
 
192
  async def run(self, page: Page, **kwargs) -> List[SubTestResult]:
193
  """run page content tests and return two separate SubTestResults
@@ -199,8 +217,8 @@ class PageContentTest:
199
  List of SubTestResult containing layout test and image test results
200
  """
201
  # ๅˆ›ๅปบไธคไธช็‹ฌ็ซ‹็š„ๆต‹่ฏ•็ป“ๆžœ
202
- layout_result = SubTestResult(name='็ฝ‘้กตๅ†…ๅฎนๆฃ€ๆŸฅ')
203
- # image_result = SubTestResult(name='็ฝ‘้กตๅ…ƒ็ด ๆฃ€ๆŸฅ')
204
 
205
  logging.info(f"{icon['running']} Running Sub Tests: {layout_result.name}")
206
 
@@ -210,10 +228,8 @@ class PageContentTest:
210
  logging.debug(f'id_map: {id_map}')
211
  await dp.remove_marker()
212
 
213
- # ๅˆ†็ฆป็”จๆˆท็”จไพ‹
214
  layout_case = self.user_cases[0]
215
- image_case = self.user_cases[1]
216
-
217
 
218
  try:
219
  if not hasattr(self.llm, '_client') or self.llm._client is None:
@@ -228,13 +244,12 @@ class PageContentTest:
228
 
229
  page_img = True
230
 
231
- with Display.display('็”จๆˆทไฝ“้ชŒๆต‹่ฏ• - ๅธƒๅฑ€ๆฃ€ๆŸฅ'):
232
  # ๆ‰ง่กŒๅธƒๅฑ€ๆฃ€ๆŸฅ
233
  await self._run_single_test(layout_result, layout_case, id_map, browser_screenshot, page_img)
234
  logging.info(f"{icon['check']} Sub Tests Completed: {layout_result.name}")
235
 
236
- # with Display.display('็”จๆˆทไฝ“้ชŒๆต‹่ฏ• - ๅ…ƒ็ด ๆฃ€ๆŸฅ'):
237
- # # ๆ‰ง่กŒๅ…ƒ็ด ๆฃ€ๆŸฅ
238
  # try:
239
  # await self._run_single_test(image_result, image_case, id_map, browser_screenshot, page_img)
240
  # logging.info(f"{icon['check']} Sub Tests Completed: {image_result.name}")
@@ -257,17 +272,17 @@ class PageContentTest:
257
  overall_status = TestStatus.PASSED
258
 
259
  prompt = self._build_prompt(user_case, id_map, len(browser_screenshot))
260
- logging.debug(f'{result.name} test {user_case[:4]} prompt: {prompt}')
261
- logging.info(f"Vision model: evaluating use case '{user_case[:4]}'...")
262
  test_page_content = await self._get_llm_response(prompt, page_img, browser_screenshot)
263
 
264
  # parse LLM response
265
  summary_text = None
266
  issues_list = []
267
- issues_text = 'ๆ— ๅ‘็Žฐ้—ฎ้ข˜' # initialize with default value
268
  case_status = TestStatus.PASSED
269
 
270
- logging.debug(f"LLM response for user case '{user_case[:4]}...': {test_page_content}")
271
 
272
  if test_page_content and str(test_page_content).strip():
273
  try:
@@ -282,7 +297,7 @@ class PageContentTest:
282
  if isinstance(parsed, dict) and parsed.get('status') == 'no_issues':
283
  # No issues found - this is the expected case
284
  case_status = TestStatus.PASSED
285
- issues_text = 'ๆ— ๅ‘็Žฐ้—ฎ้ข˜'
286
  logging.debug(f"LLM confirmed no issues found: {parsed.get('message', 'No issues detected')}")
287
 
288
  # Check if it's the "issues found" format (array)
@@ -410,29 +425,21 @@ class PageContentTest:
410
  else:
411
  # no valid content from LLM, treat as no issues found
412
  case_status = TestStatus.PASSED
413
- issues_text = 'ๆ— ๅ‘็Žฐ้—ฎ้ข˜'
414
  logging.debug(f'LLM returned no content, treating as PASSED')
415
 
416
- result.report.append(SubTestReport(title="ๅ†…ๅฎนๆฃ€ๆŸฅ", issues=issues_text))
417
  # aggregate overall status: any WARNING -> WARNING; else PASSED
418
  if case_status == TestStatus.WARNING and overall_status != TestStatus.WARNING:
419
  overall_status = TestStatus.WARNING
420
 
421
  result.status = overall_status
422
 
423
- @staticmethod
424
- def _build_prompt(user_case: str, id_map: dict, screenshot_count: int = 0) -> str:
425
- # ๅˆคๆ–ญๆฃ€ๆŸฅ็ฑปๅž‹
426
- is_layout_check = 'ๅธƒๅฑ€ๆฃ€ๆŸฅ' in user_case
427
- is_missing_element_check = 'ๅ…ƒ็ด ๆฃ€ๆŸฅ' in user_case
428
- is_text_check = 'ๆ–‡ๅญ—ๆŽ’็‰ˆ' in user_case
429
-
430
  # ๆž„ๅปบ็ป“ๆž„ๅŒ–็š„DOM/CSSไฟกๆฏๆ‘˜่ฆ
431
  structured_info = ''
432
 
433
- # ๆŽ’็‰ˆๆฃ€ๆŸฅๆ‰ๅŒ…ๅซๆ ทๅผไฟกๆฏ๏ผŒๅ…ƒ็ด ็ผบๅคฑๆฃ€ๆŸฅๅชไพ่ต–็บฏ่ง†่ง‰
434
- if is_layout_check and id_map:
435
- # ๆไพ›ๅŸบ็ก€ๅ…ƒ็ด ไฟกๆฏไพ›ๆจกๅž‹ๅ‚่€ƒ๏ผŒไธ้ข„ๅ…ˆๆ ‡่ฎฐ้—ฎ้ข˜
436
  key_elements = []
437
 
438
  for element_id, info in id_map.items():
@@ -607,110 +614,110 @@ class PageContentTest:
607
  - When DOM signals indicate potential issues, verify visually and provide precise coordinates
608
  """
609
 
610
- elif is_missing_element_check:
611
- # ไธบๅ…ƒ็ด ็ผบๅคฑๆฃ€ๆŸฅๆไพ›ๅ›พ็‰‡ๅ…ƒ็ด ็š„ๅŸบ็ก€ไฟกๆฏ
612
- image_elements_info = ''
613
- if id_map:
614
- image_elements = []
615
- for element_id, info in id_map.items():
616
- if not isinstance(info, dict):
617
- continue
618
-
619
- tag = info.get('tagName', '')
620
- if tag in ['img', 'svg']:
621
- viewport = info.get('viewport', {})
622
- styles = info.get('styles', {}) or {}
623
-
624
- img_info = {
625
- 'id': element_id,
626
- 'tag': tag,
627
- 'position': f"({viewport.get('x', 0):.0f}, {viewport.get('y', 0):.0f})",
628
- 'size': f"{viewport.get('width', 0):.0f}ร—{viewport.get('height', 0):.0f}",
629
- 'src': info.get('src', 'N/A'),
630
- 'alt': info.get('alt', 'N/A')
631
- }
632
-
633
- # ๆฃ€ๆŸฅๅฏ่ƒฝ็š„ๅ ไฝๅ›พไฟกๅท
634
- placeholder_signals = []
635
- bg_image = styles.get('backgroundImage', '')
636
- if 'placeholder' in str(img_info['src']).lower() or 'placeholder' in bg_image.lower():
637
- placeholder_signals.append('PLACEHOLDER_SRC')
638
- if bg_image and bg_image != 'none':
639
- placeholder_signals.append('HAS_BACKGROUND')
640
- if viewport.get('width', 0) == viewport.get('height', 0): # ๆญฃๆ–นๅฝขๅฏ่ƒฝๆ˜ฏๅ ไฝๅ›พ
641
- placeholder_signals.append('SQUARE_ASPECT')
642
-
643
- if placeholder_signals:
644
- img_info['signals'] = placeholder_signals
645
-
646
- image_elements.append(img_info)
647
-
648
- if image_elements:
649
- image_elements_info = f'\nImage Elements Found: {len(image_elements)}\n'
650
- for img in image_elements[:10]: # ้™ๅˆถๆ˜พ็คบๆ•ฐ้‡
651
- desc = f"- {img['tag']}@{img['position']} [{img['size']}]"
652
- if img['src'] != 'N/A':
653
- desc += f" src=\"{img['src'][:50]}{'...' if len(img['src']) > 50 else ''}\""
654
- if img.get('signals'):
655
- desc += f" [โš ๏ธ {','.join(img['signals'])}]"
656
- image_elements_info += f'\n{desc}'
657
- else:
658
- image_elements_info = '\nNo image elements detected in DOM.'
659
-
660
- logging.debug(f'image_elements_info: {image_elements_info}')
661
- return f"""## Missing Image Element Analysis Task
662
- **Input**: Visual analysis with DOM context - {screenshot_count} screenshots (index 0-{screenshot_count-1})
663
-
664
- **Objective**: {user_case}
665
-
666
- **Image Reference**: {image_elements_info}
667
-
668
- ### Output Requirements
669
- {LLMPrompt.OUTPUT_FORMAT}
670
-
671
- **Rules**:
672
- - Focus on visual evidence, use DOM signals as supporting hints
673
- - Pay special attention to elements marked with โš ๏ธ placeholder signals
674
- - For gray blocks or obvious placeholders, identify them as missing content
675
- - If unsure about rendering vs loading state, provide evidence-based judgment
676
- - For multiple issues in one screenshot, create separate objects for each
677
- - If no issues found, output strictly: None (no explanation needed)
678
-
679
- **Requirements**:
680
- - Coordinates must be pixel-precise [x1,y1,x2,y2] based on visual observation
681
- - Clearly identify what type of content appears to be missing
682
- - Fix suggestions must be actionable and specific
683
- """
684
-
685
- elif is_text_check:
686
- return f"""## Text Typography Analysis Task
687
- **Input**: Visual analysis with DOM context - {screenshot_count} screenshots (index 0-{screenshot_count-1})
688
-
689
- **Objective**: {user_case}
690
-
691
- ### Output Requirements
692
- {LLMPrompt.OUTPUT_FORMAT}
693
-
694
- **Requirements**:
695
- - Coordinates must be pixel-precise [x1,y1,x2,y2] based on visual observation
696
- - Clearly describe the specific style or layout inconsistency observed
697
- - Suggestions must be actionable and specific (e.g., "Increase line-height from 1.1 to 1.4 for better readability")
698
- """
699
- else:
700
  # ้ป˜่ฎคๆƒ…ๅ†ต๏ผŒๅค„็†ๆœช็Ÿฅ็š„ๆฃ€ๆŸฅ็ฑปๅž‹
701
- return f"""## General Content Analysis Task
702
- **Input**: Visual analysis with DOM context - {screenshot_count} screenshots (index 0-{screenshot_count-1})
703
 
704
- **Objective**: {user_case}
705
 
706
- ### Output Requirements
707
- {LLMPrompt.OUTPUT_FORMAT}
708
 
709
- **Requirements**:
710
- - Coordinates must be pixel-precise [x1,y1,x2,y2] based on visual observation
711
- - Clearly describe any issues found
712
- - Suggestions must be actionable and specific
713
- """
714
 
715
  async def _get_llm_response(self, prompt: str, page_img: bool, browser_screenshot=None):
716
  if page_img and browser_screenshot:
 
20
  from webqa_agent.llm.prompt import LLMPrompt
21
  from webqa_agent.utils import Display
22
  from webqa_agent.utils.log_icon import icon
23
+ from webqa_agent.utils import i18n
24
 
25
  try:
26
  from PIL import Image, ImageDraw
 
31
 
32
  class PageTextTest:
33
 
34
+ def __init__(self, llm_config: dict, user_cases: List[str] = None, report_config: dict = None):
35
  self.llm_config = llm_config
36
  self.user_cases = user_cases or LLMPrompt.TEXT_USER_CASES
37
  self.llm = LLMAPI(self.llm_config)
38
+ self.language = report_config["language"] if report_config else "zh-CN"
39
+ self.localized_strings = {
40
+ 'zh-CN': i18n.get_lang_data('zh-CN').get('testers', {}).get('ux', {}),
41
+ 'en-US': i18n.get_lang_data('en-US').get('testers', {}).get('ux', {}),
42
+ }
43
+
44
+ def _get_text(self, key: str) -> str:
45
+ """Get localized text for the given key."""
46
+ return self.localized_strings.get(self.language, {}).get(key, key)
47
 
48
  async def get_iframe_content(self, frame):
49
  # get iframe content
 
56
  async def run(self, page: Page) -> SubTestResult:
57
  """Runs a test to check the text content of a web page and identifies
58
  any issues based on predefined user cases."""
59
+ result = SubTestResult(name=self._get_text('text_check_name'))
60
  logging.info(f"{icon['running']} Running Sub Test: {result.name}")
61
 
62
+ with Display.display(self._get_text('ux_test_display') + result.name):
63
  try:
64
  # ๅˆ›ๅปบActionHandler็”จไบŽๆˆชๅ›พ
65
  action_handler = ActionHandler()
 
71
  if is_blank:
72
  logging.error('page is blank, no visible content')
73
  result.status = TestStatus.FAILED
74
+ result.messages = {'page': self._get_text('page_blank_error')}
75
  return result
76
 
77
  logging.debug('page is not blank, start crawling page content')
 
99
  issues = self.format_issues_to_markdown(test_page_content)
100
  else:
101
  result.status = TestStatus.PASSED
102
+ issues = self._get_text('no_issues_found')
103
  result.report.append(
104
  SubTestReport(
105
+ title=self._get_text('report_title'),
106
  issues=issues,
107
  )
108
  )
 
117
 
118
  return result
119
 
120
+ def _build_prompt(self, page_text: str, user_case: str) -> str:
121
+ """Builds the LLM prompt in English."""
122
+ return f"""Task description: Based on the provided web page content and user cases, check for any typos or English grammar errors. If errors are found, output the results in the specified JSON format.
123
+ Input information:
124
+ - Web content: ${page_text}
125
+ - User case: ${user_case}
126
+ Output requirements:
127
+ - If no errors are found, output only None, do not include any explanations.
128
+ - If errors are found, please output in the following JSON format:
 
129
  {{
130
  "error": [
131
  {{
132
+ "location": "Description of error location",
133
+ "current": "Current erroneous content",
134
+ "suggested": "Suggested modification",
135
+ "type": "Error type"
136
  }}
137
  ],
138
+ "reason": "Overall problem description"
139
  }}
140
  """
141
 
142
+ def format_issues_to_markdown(self, issues_content: str) -> str:
 
143
  # Format issues to markdown
144
+ if not issues_content or issues_content == self._get_text('no_issues_found'):
145
  return issues_content
146
 
147
  try:
 
158
  reason_summary = data['reason']
159
 
160
  if not errors:
161
+ return self._get_text('no_issues_found')
162
 
163
+ markdown_content = ''
164
  if reason_summary:
165
+ markdown_content += f"{self._get_text('overall_problem')}{reason_summary}\n\n"
166
 
167
  if isinstance(errors, list):
168
  for i, error_item in enumerate(errors, 1):
169
  if isinstance(error_item, dict):
170
+ location = error_item.get('location', self._get_text('unknown_location'))
171
  current = error_item.get('current', '')
172
  suggested = error_item.get('suggested', '')
173
+ error_type = error_item.get('type', self._get_text('unknown_type'))
174
 
175
+ markdown_content += f"{self._get_text('issue_details')}".format(i)
176
+ markdown_content += f"{self._get_text('location')}{location}\n\n"
177
+ markdown_content += f"{self._get_text('error_content')}`{current}`\n\n"
178
+ markdown_content += f"{self._get_text('suggested_fix')}`{suggested}`\n\n"
179
+ markdown_content += f"{self._get_text('error_type')}{error_type}\n\n"
180
  else:
181
+ markdown_content += f"{self._get_text('issue_details')}".format(i)
182
+ markdown_content += f"{self._get_text('error_content')}{error_item}\n\n"
183
  else:
184
+ markdown_content += f"{self._get_text('error_content')}{errors}\n\n"
185
 
186
  return markdown_content
187
  else:
 
193
 
194
  class PageContentTest:
195
 
196
+ def __init__(self, llm_config: dict, user_cases: List[str] = None, report_config: dict = None):
197
  self.llm_config = llm_config
198
  self.user_cases = user_cases or LLMPrompt.CONTENT_USER_CASES
199
  self.llm = LLMAPI(self.llm_config)
200
+ self.language = report_config["language"] if report_config else "zh-CN"
201
+ self.localized_strings = {
202
+ 'zh-CN': i18n.get_lang_data('zh-CN').get('testers', {}).get('ux', {}),
203
+ 'en-US': i18n.get_lang_data('en-US').get('testers', {}).get('ux', {}),
204
+ }
205
+
206
+ def _get_text(self, key: str) -> str:
207
+ """Get localized text for the given key."""
208
+ return self.localized_strings.get(self.language, {}).get(key, key)
209
 
210
  async def run(self, page: Page, **kwargs) -> List[SubTestResult]:
211
  """run page content tests and return two separate SubTestResults
 
217
  List of SubTestResult containing layout test and image test results
218
  """
219
  # ๅˆ›ๅปบไธคไธช็‹ฌ็ซ‹็š„ๆต‹่ฏ•็ป“ๆžœ
220
+ layout_result = SubTestResult(name=self._get_text('layout_check_name'))
221
+ # image_result = SubTestResult(name=_['element_check_name'])
222
 
223
  logging.info(f"{icon['running']} Running Sub Tests: {layout_result.name}")
224
 
 
228
  logging.debug(f'id_map: {id_map}')
229
  await dp.remove_marker()
230
 
231
+ # LAYOUT
232
  layout_case = self.user_cases[0]
 
 
233
 
234
  try:
235
  if not hasattr(self.llm, '_client') or self.llm._client is None:
 
244
 
245
  page_img = True
246
 
247
+ with Display.display(self._get_text('ux_test_display') + self._get_text('layout_case')):
248
  # ๆ‰ง่กŒๅธƒๅฑ€ๆฃ€ๆŸฅ
249
  await self._run_single_test(layout_result, layout_case, id_map, browser_screenshot, page_img)
250
  logging.info(f"{icon['check']} Sub Tests Completed: {layout_result.name}")
251
 
252
+ # with Display.display(_['ux_test_display'] + _['element_check_name']):
 
253
  # try:
254
  # await self._run_single_test(image_result, image_case, id_map, browser_screenshot, page_img)
255
  # logging.info(f"{icon['check']} Sub Tests Completed: {image_result.name}")
 
272
  overall_status = TestStatus.PASSED
273
 
274
  prompt = self._build_prompt(user_case, id_map, len(browser_screenshot))
275
+ logging.debug(f'{result.name} test, prompt: {prompt}')
276
+ logging.info(f"Vision model: evaluating use case '{result.name}'...")
277
  test_page_content = await self._get_llm_response(prompt, page_img, browser_screenshot)
278
 
279
  # parse LLM response
280
  summary_text = None
281
  issues_list = []
282
+ issues_text = self._get_text('no_issues_found') # initialize with default value
283
  case_status = TestStatus.PASSED
284
 
285
+ logging.debug(f"LLM response for user case '{result.name}...': {test_page_content}")
286
 
287
  if test_page_content and str(test_page_content).strip():
288
  try:
 
297
  if isinstance(parsed, dict) and parsed.get('status') == 'no_issues':
298
  # No issues found - this is the expected case
299
  case_status = TestStatus.PASSED
300
+ issues_text = self._get_text('no_issues_found')
301
  logging.debug(f"LLM confirmed no issues found: {parsed.get('message', 'No issues detected')}")
302
 
303
  # Check if it's the "issues found" format (array)
 
425
  else:
426
  # no valid content from LLM, treat as no issues found
427
  case_status = TestStatus.PASSED
428
+ issues_text = self._get_text('no_issues_found')
429
  logging.debug(f'LLM returned no content, treating as PASSED')
430
 
431
+ result.report.append(SubTestReport(title=self._get_text('report_title'), issues=issues_text))
432
  # aggregate overall status: any WARNING -> WARNING; else PASSED
433
  if case_status == TestStatus.WARNING and overall_status != TestStatus.WARNING:
434
  overall_status = TestStatus.WARNING
435
 
436
  result.status = overall_status
437
 
438
+ def _build_prompt(self, user_case: str, id_map: dict, screenshot_count: int = 0) -> str:
 
 
 
 
 
 
439
  # ๆž„ๅปบ็ป“ๆž„ๅŒ–็š„DOM/CSSไฟกๆฏๆ‘˜่ฆ
440
  structured_info = ''
441
 
442
+ if id_map:
 
 
443
  key_elements = []
444
 
445
  for element_id, info in id_map.items():
 
614
  - When DOM signals indicate potential issues, verify visually and provide precise coordinates
615
  """
616
 
617
+ # elif is_missing_element_check:
618
+ # # ไธบๅ…ƒ็ด ็ผบๅคฑๆฃ€ๆŸฅๆไพ›ๅ›พ็‰‡ๅ…ƒ็ด ็š„ๅŸบ็ก€ไฟกๆฏ
619
+ # image_elements_info = ''
620
+ # if id_map:
621
+ # image_elements = []
622
+ # for element_id, info in id_map.items():
623
+ # if not isinstance(info, dict):
624
+ # continue
625
+
626
+ # tag = info.get('tagName', '')
627
+ # if tag in ['img', 'svg']:
628
+ # viewport = info.get('viewport', {})
629
+ # styles = info.get('styles', {}) or {}
630
+
631
+ # img_info = {
632
+ # 'id': element_id,
633
+ # 'tag': tag,
634
+ # 'position': f"({viewport.get('x', 0):.0f}, {viewport.get('y', 0):.0f})",
635
+ # 'size': f"{viewport.get('width', 0):.0f}ร—{viewport.get('height', 0):.0f}",
636
+ # 'src': info.get('src', 'N/A'),
637
+ # 'alt': info.get('alt', 'N/A')
638
+ # }
639
+
640
+ # # ๆฃ€ๆŸฅๅฏ่ƒฝ็š„ๅ ไฝๅ›พไฟกๅท
641
+ # placeholder_signals = []
642
+ # bg_image = styles.get('backgroundImage', '')
643
+ # if 'placeholder' in str(img_info['src']).lower() or 'placeholder' in bg_image.lower():
644
+ # placeholder_signals.append('PLACEHOLDER_SRC')
645
+ # if bg_image and bg_image != 'none':
646
+ # placeholder_signals.append('HAS_BACKGROUND')
647
+ # if viewport.get('width', 0) == viewport.get('height', 0): # ๆญฃๆ–นๅฝขๅฏ่ƒฝๆ˜ฏๅ ไฝๅ›พ
648
+ # placeholder_signals.append('SQUARE_ASPECT')
649
+
650
+ # if placeholder_signals:
651
+ # img_info['signals'] = placeholder_signals
652
+
653
+ # image_elements.append(img_info)
654
+
655
+ # if image_elements:
656
+ # image_elements_info = f'\nImage Elements Found: {len(image_elements)}\n'
657
+ # for img in image_elements[:10]: # ้™ๅˆถๆ˜พ็คบๆ•ฐ้‡
658
+ # desc = f"- {img['tag']}@{img['position']} [{img['size']}]"
659
+ # if img['src'] != 'N/A':
660
+ # desc += f" src=\"{img['src'][:50]}{'...' if len(img['src']) > 50 else ''}\""
661
+ # if img.get('signals'):
662
+ # desc += f" [โš ๏ธ {','.join(img['signals'])}]"
663
+ # image_elements_info += f'\n{desc}'
664
+ # else:
665
+ # image_elements_info = '\nNo image elements detected in DOM.'
666
+
667
+ # logging.debug(f'image_elements_info: {image_elements_info}')
668
+ # return f"""## Missing Image Element Analysis Task
669
+ # **Input**: Visual analysis with DOM context - {screenshot_count} screenshots (index 0-{screenshot_count-1})
670
+
671
+ # **Objective**: {user_case}
672
+
673
+ # **Image Reference**: {image_elements_info}
674
+
675
+ # ### Output Requirements
676
+ # {LLMPrompt.OUTPUT_FORMAT}
677
+
678
+ # **Rules**:
679
+ # - Focus on visual evidence, use DOM signals as supporting hints
680
+ # - Pay special attention to elements marked with โš ๏ธ placeholder signals
681
+ # - For gray blocks or obvious placeholders, identify them as missing content
682
+ # - If unsure about rendering vs loading state, provide evidence-based judgment
683
+ # - For multiple issues in one screenshot, create separate objects for each
684
+ # - If no issues found, output strictly: None (no explanation needed)
685
+
686
+ # **Requirements**:
687
+ # - Coordinates must be pixel-precise [x1,y1,x2,y2] based on visual observation
688
+ # - Clearly identify what type of content appears to be missing
689
+ # - Fix suggestions must be actionable and specific
690
+ # """
691
+
692
+ # elif is_text_check:
693
+ # return f"""## Text Typography Analysis Task
694
+ # **Input**: Visual analysis with DOM context - {screenshot_count} screenshots (index 0-{screenshot_count-1})
695
+
696
+ # **Objective**: {user_case}
697
+
698
+ # ### Output Requirements
699
+ # {LLMPrompt.OUTPUT_FORMAT}
700
+
701
+ # **Requirements**:
702
+ # - Coordinates must be pixel-precise [x1,y1,x2,y2] based on visual observation
703
+ # - Clearly describe the specific style or layout inconsistency observed
704
+ # - Suggestions must be actionable and specific (e.g., "Increase line-height from 1.1 to 1.4 for better readability")
705
+ # """
706
+ # else:
707
  # ้ป˜่ฎคๆƒ…ๅ†ต๏ผŒๅค„็†ๆœช็Ÿฅ็š„ๆฃ€ๆŸฅ็ฑปๅž‹
708
+ # return f"""## General Content Analysis Task
709
+ # **Input**: Visual analysis with DOM context - {screenshot_count} screenshots (index 0-{screenshot_count-1})
710
 
711
+ # **Objective**: {user_case}
712
 
713
+ # ### Output Requirements
714
+ # {LLMPrompt.OUTPUT_FORMAT}
715
 
716
+ # **Requirements**:
717
+ # - Coordinates must be pixel-precise [x1,y1,x2,y2] based on visual observation
718
+ # - Clearly describe any issues found
719
+ # - Suggestions must be actionable and specific
720
+ # """
721
 
722
  async def _get_llm_response(self, prompt: str, page_img: bool, browser_screenshot=None):
723
  if page_img and browser_screenshot:
webqa_agent/utils/i18n.py ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import logging
3
+ from functools import lru_cache
4
+ from pathlib import Path
5
+ from typing import Any
6
+
7
+
8
+ I18N_DIR = Path(__file__).resolve().parents[1] / 'static' / 'i18n'
9
+
10
+
11
+ @lru_cache(maxsize=8)
12
+ def _load_lang(lang: str) -> dict[str, Any]:
13
+ try:
14
+ file_path = I18N_DIR / f'{lang}.json'
15
+ if not file_path.exists():
16
+ # fallback to zh-CN if specific lang not found
17
+ logging.warning(f'i18n file not found for {lang}, fallback to zh-CN')
18
+ file_path = I18N_DIR / 'zh-CN.json'
19
+ return json.loads(file_path.read_text(encoding='utf-8'))
20
+ except Exception as e:
21
+ logging.error(f'Failed to load i18n file for {lang}: {e}')
22
+ return {}
23
+
24
+
25
+ def t(lang: str, key: str, default: str | None = None) -> str:
26
+ """Translate a namespaced key like 'aggregator.summary_and_advice'."""
27
+ data = _load_lang(lang)
28
+ node: Any = data
29
+ for part in key.split('.'):
30
+ if isinstance(node, dict) and part in node:
31
+ node = node[part]
32
+ else:
33
+ return default if default is not None else key
34
+ return node if isinstance(node, str) else default if default is not None else key
35
+
36
+
37
+ def get_lang_data(lang: str) -> dict[str, Any]:
38
+ """Return the full language dict (read-only)."""
39
+ return _load_lang(lang).copy()
40
+
41
+
webqa_agent/utils/task_display_util.py CHANGED
@@ -11,6 +11,7 @@ from typing import Optional, List
11
  from collections import deque
12
 
13
  from webqa_agent.utils.get_log import COLORS
 
14
 
15
 
16
  @dataclass
@@ -51,14 +52,14 @@ class Display:
51
  display = None
52
 
53
  @classmethod
54
- def init(cls):
55
- cls.display = _Display()
56
 
57
 
58
  class _Display:
59
  SPINNER = ['โ ‹', 'โ ™', 'โ น', 'โ ธ', 'โ ผ', 'โ ด', 'โ ฆ', 'โ ง', 'โ ‡', 'โ ']
60
 
61
- def __init__(self, refresh_interval: float = 0.1):
62
  self.logger = logging.getLogger()
63
  self.logger_handlers = []
64
  self.running: List[TaskInfo] = []
@@ -71,6 +72,11 @@ class _Display:
71
  self.captured_output = StringIO()
72
  self._log_queue = deque(maxlen=1000)
73
  self.num_log = 5 # TODO: Make it configurable
 
 
 
 
 
74
 
75
  for hdr in self.logger.handlers:
76
  if isinstance(hdr, logging.StreamHandler) and hdr.name == "stream":
@@ -79,6 +85,10 @@ class _Display:
79
 
80
  self.log_pattern = re.compile(r"(\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2},\d+)(\s+)(\w+)(\s+\[.*?]\s+\[.*?]\s+-\s+)(.*)")
81
 
 
 
 
 
82
  def __call__(self, name: str):
83
  return _Tracker(self, name)
84
 
@@ -113,7 +123,7 @@ class _Display:
113
  out = sys.stdout
114
  out.write("\x1b[H\x1b[J")
115
  with self._lock:
116
- out.write("๐ŸŽ‰ ๅทฒๅฎŒๆˆไปปๅŠก\n")
117
  for t in self.completed:
118
  if t.end is None:
119
  continue
@@ -124,7 +134,7 @@ class _Display:
124
 
125
  out.write("โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•\n")
126
 
127
- out.write("๐Ÿš€ ๆญฃๅœจๆ‰ง่กŒไปปๅŠก\n")
128
  now = time.monotonic()
129
  for t in self.running:
130
  elapsed = now - t.start
@@ -156,7 +166,7 @@ class _Display:
156
  # captured = self.captured_output.getvalue()
157
  # if captured:
158
  # out.write(captured)
159
- out.write("๐Ÿ“Š ไปปๅŠกๆ‰ง่กŒ็ปŸ่ฎก้ขๆฟ\n")
160
  out.write("โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•\n")
161
 
162
  total = len(self.completed)
@@ -167,13 +177,13 @@ class _Display:
167
  # out.write(f"๐Ÿ”ข ๆ€ปไปปๅŠกๆ•ฐ๏ผš{total}\n")
168
  # out.write(f"โœ… ๆˆๅŠŸไปปๅŠก๏ผš{success}\n")
169
  # out.write(f"โŒ ๅคฑ่ดฅไปปๅŠก๏ผš{failed}\n")
170
- out.write(f"โฑ๏ธ ๆ€ปๅ…ฑ่€—ๆ—ถ๏ผš{total_time:.2f}s\n")
171
 
172
  if failed > 0:
173
- out.write("โš ๏ธ ้”™่ฏฏไปปๅŠกๅˆ—่กจ๏ผš\n")
174
  for t in self.completed:
175
  if t.error:
176
- out.write(f" โŒ {t.name} ้”™่ฏฏไฟกๆฏ๏ผš{t.error}\n")
177
 
178
  # out.write("โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•\n")
179
  # out.write("๐ŸŽฏ Done๏ผ\n")
 
11
  from collections import deque
12
 
13
  from webqa_agent.utils.get_log import COLORS
14
+ from webqa_agent.utils import i18n
15
 
16
 
17
  @dataclass
 
52
  display = None
53
 
54
  @classmethod
55
+ def init(cls, language: str = 'zh-CN'):
56
+ cls.display = _Display(language=language)
57
 
58
 
59
  class _Display:
60
  SPINNER = ['โ ‹', 'โ ™', 'โ น', 'โ ธ', 'โ ผ', 'โ ด', 'โ ฆ', 'โ ง', 'โ ‡', 'โ ']
61
 
62
+ def __init__(self, refresh_interval: float = 0.1, language: str = 'zh-CN'):
63
  self.logger = logging.getLogger()
64
  self.logger_handlers = []
65
  self.running: List[TaskInfo] = []
 
72
  self.captured_output = StringIO()
73
  self._log_queue = deque(maxlen=1000)
74
  self.num_log = 5 # TODO: Make it configurable
75
+ self.language = language
76
+ self.localized_strings = {
77
+ "zh-CN": i18n.get_lang_data('zh-CN').get('display', {}),
78
+ "en-US": i18n.get_lang_data('en-US').get('display', {}),
79
+ }
80
 
81
  for hdr in self.logger.handlers:
82
  if isinstance(hdr, logging.StreamHandler) and hdr.name == "stream":
 
85
 
86
  self.log_pattern = re.compile(r"(\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2},\d+)(\s+)(\w+)(\s+\[.*?]\s+\[.*?]\s+-\s+)(.*)")
87
 
88
+ def _get_text(self, key: str) -> str:
89
+ """Get localized text for the given key."""
90
+ return self.localized_strings.get(self.language, {}).get(key, key)
91
+
92
  def __call__(self, name: str):
93
  return _Tracker(self, name)
94
 
 
123
  out = sys.stdout
124
  out.write("\x1b[H\x1b[J")
125
  with self._lock:
126
+ out.write(self._get_text("completed_tasks") + "\n")
127
  for t in self.completed:
128
  if t.end is None:
129
  continue
 
134
 
135
  out.write("โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•\n")
136
 
137
+ out.write(self._get_text("running_tasks") + "\n")
138
  now = time.monotonic()
139
  for t in self.running:
140
  elapsed = now - t.start
 
166
  # captured = self.captured_output.getvalue()
167
  # if captured:
168
  # out.write(captured)
169
+ out.write(self._get_text("task_execution_summary") + "\n")
170
  out.write("โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•\n")
171
 
172
  total = len(self.completed)
 
177
  # out.write(f"๐Ÿ”ข ๆ€ปไปปๅŠกๆ•ฐ๏ผš{total}\n")
178
  # out.write(f"โœ… ๆˆๅŠŸไปปๅŠก๏ผš{success}\n")
179
  # out.write(f"โŒ ๅคฑ่ดฅไปปๅŠก๏ผš{failed}\n")
180
+ out.write(f"{self._get_text('total_time')}๏ผš{total_time:.2f}s\n")
181
 
182
  if failed > 0:
183
+ out.write(self._get_text("error_tasks") + "\n")
184
  for t in self.completed:
185
  if t.error:
186
+ out.write(f" โŒ {t.name} {self._get_text('error_message')}๏ผš{t.error}\n")
187
 
188
  # out.write("โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•\n")
189
  # out.write("๐ŸŽฏ Done๏ผ\n")