Animality commited on
Commit
6eb9915
ยท
verified ยท
1 Parent(s): 754fbce

Upload folder using huggingface_hub

Browse files
.dockerignore ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ data
2
+ tmp
.env.example ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ OPENAI_ENDPOINT=https://api.openai.com/v1
2
+ OPENAI_API_KEY=
3
+
4
+ ANTHROPIC_API_KEY=
5
+ ANTHROPIC_ENDPOINT=https://api.anthropic.com
6
+
7
+ GOOGLE_API_KEY=
8
+
9
+ AZURE_OPENAI_ENDPOINT=
10
+ AZURE_OPENAI_API_KEY=
11
+ AZURE_OPENAI_API_VERSION=2025-01-01-preview
12
+
13
+ DEEPSEEK_ENDPOINT=https://api.deepseek.com
14
+ DEEPSEEK_API_KEY=
15
+
16
+ MISTRAL_API_KEY=
17
+ MISTRAL_ENDPOINT=https://api.mistral.ai/v1
18
+
19
+ OLLAMA_ENDPOINT=http://localhost:11434
20
+
21
+ # Set to false to disable anonymized telemetry
22
+ ANONYMIZED_TELEMETRY=true
23
+
24
+ # LogLevel: Set to debug to enable verbose logging, set to result to get results only. Available: result | debug | info
25
+ BROWSER_USE_LOGGING_LEVEL=info
26
+
27
+ # Chrome settings
28
+ CHROME_PATH=
29
+ CHROME_USER_DATA=
30
+ CHROME_DEBUGGING_PORT=9222
31
+ CHROME_DEBUGGING_HOST=localhost
32
+ # Set to true to keep browser open between AI tasks
33
+ CHROME_PERSISTENT_SESSION=false
34
+
35
+ # Display settings
36
+ # Format: WIDTHxHEIGHTxDEPTH
37
+ RESOLUTION=1920x1080x24
38
+ # Width in pixels
39
+ RESOLUTION_WIDTH=1920
40
+ # Height in pixels
41
+ RESOLUTION_HEIGHT=1080
42
+
43
+ # VNC settings
44
+ VNC_PASSWORD=youvncpassword
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ assets/examples/test.png filter=lfs diff=lfs merge=lfs -text
.github/workflows/update_space.yml ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Run Python script
2
+
3
+ on:
4
+ push:
5
+ branches:
6
+ - automality
7
+
8
+ jobs:
9
+ build:
10
+ runs-on: ubuntu-latest
11
+
12
+ steps:
13
+ - name: Checkout
14
+ uses: actions/checkout@v2
15
+
16
+ - name: Set up Python
17
+ uses: actions/setup-python@v2
18
+ with:
19
+ python-version: '3.9'
20
+
21
+ - name: Install Gradio
22
+ run: python -m pip install gradio
23
+
24
+ - name: Log in to Hugging Face
25
+ run: python -c 'import huggingface_hub; huggingface_hub.login(token="${{ secrets.hf_token }}")'
26
+
27
+ - name: Deploy to Spaces
28
+ run: gradio deploy
.gitignore ADDED
@@ -0,0 +1,189 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Byte-compiled / optimized / DLL files
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+
6
+ # C extensions
7
+ *.so
8
+
9
+ # Distribution / packaging
10
+ .Python
11
+ build/
12
+ develop-eggs/
13
+ dist/
14
+ downloads/
15
+ eggs/
16
+ .eggs/
17
+ lib/
18
+ lib64/
19
+ parts/
20
+ sdist/
21
+ var/
22
+ wheels/
23
+ share/python-wheels/
24
+ *.egg-info/
25
+ .installed.cfg
26
+ *.egg
27
+ MANIFEST
28
+
29
+ # PyInstaller
30
+ # Usually these files are written by a python script from a template
31
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
32
+ *.manifest
33
+ *.spec
34
+
35
+ # Installer logs
36
+ pip-log.txt
37
+ pip-delete-this-directory.txt
38
+
39
+ # Unit test / coverage reports
40
+ htmlcov/
41
+ .tox/
42
+ .nox/
43
+ .coverage
44
+ .coverage.*
45
+ .cache
46
+ nosetests.xml
47
+ coverage.xml
48
+ *.cover
49
+ *.py,cover
50
+ .hypothesis/
51
+ .pytest_cache/
52
+ cover/
53
+
54
+ # Translations
55
+ *.mo
56
+ *.pot
57
+
58
+ # Django stuff:
59
+ *.log
60
+ local_settings.py
61
+ db.sqlite3
62
+ db.sqlite3-journal
63
+
64
+ # Flask stuff:
65
+ instance/
66
+ .webassets-cache
67
+
68
+ # Scrapy stuff:
69
+ .scrapy
70
+
71
+ # Sphinx documentation
72
+ docs/_build/
73
+
74
+ # PyBuilder
75
+ .pybuilder/
76
+ target/
77
+
78
+ # Jupyter Notebook
79
+ .ipynb_checkpoints
80
+
81
+ # IPython
82
+ profile_default/
83
+ ipython_config.py
84
+
85
+ # pyenv
86
+ # For a library or package, you might want to ignore these files since the code is
87
+ # intended to run in multiple environments; otherwise, check them in:
88
+ # .python-version
89
+
90
+ # pipenv
91
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
93
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
94
+ # install all needed dependencies.
95
+ #Pipfile.lock
96
+
97
+ # poetry
98
+ # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
99
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
100
+ # commonly ignored for libraries.
101
+ # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
102
+ #poetry.lock
103
+
104
+ # pdm
105
+ # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
106
+ #pdm.lock
107
+ # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
108
+ # in version control.
109
+ # https://pdm.fming.dev/latest/usage/project/#working-with-version-control
110
+ .pdm.toml
111
+ .pdm-python
112
+ .pdm-build/
113
+
114
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
115
+ __pypackages__/
116
+
117
+ # Celery stuff
118
+ celerybeat-schedule
119
+ celerybeat.pid
120
+
121
+ # SageMath parsed files
122
+ *.sage.py
123
+
124
+ # Environments
125
+ .env
126
+ .venv
127
+ env/
128
+ venv/
129
+ ENV/
130
+ env.bak/
131
+ venv.bak/
132
+ test_env/
133
+ myenv
134
+
135
+
136
+ # Spyder project settings
137
+ .spyderproject
138
+ .spyproject
139
+
140
+ # Rope project settings
141
+ .ropeproject
142
+
143
+ # mkdocs documentation
144
+ /site
145
+
146
+ # mypy
147
+ .mypy_cache/
148
+ .dmypy.json
149
+ dmypy.json
150
+
151
+ # Pyre type checker
152
+ .pyre/
153
+
154
+ # pytype static type analyzer
155
+ .pytype/
156
+
157
+ # Cython debug symbols
158
+ cython_debug/
159
+
160
+ # PyCharm
161
+ # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
162
+ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
163
+ # and can be added to the global gitignore or merged into this file. For a more nuclear
164
+ # option (not recommended) you can uncomment the following to ignore the entire idea folder.
165
+ .idea/
166
+ temp
167
+ tmp
168
+
169
+
170
+ .DS_Store
171
+
172
+ private_example.py
173
+ private_example
174
+
175
+ browser_cookies.json
176
+ cookies.json
177
+ AgentHistory.json
178
+ cv_04_24.pdf
179
+ AgentHistoryList.json
180
+ *.gif
181
+
182
+ # For Sharing (.pem files)
183
+ .gradio/
184
+
185
+ # For Docker
186
+ data/
187
+
188
+ # For Config Files (Current Settings)
189
+ .config.pkl
.vscode/settings.json ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "python.analysis.typeCheckingMode": "basic",
3
+ "[python]": {
4
+ "editor.defaultFormatter": "charliermarsh.ruff",
5
+ "editor.formatOnSave": true,
6
+ "editor.codeActionsOnSave": {
7
+ "source.fixAll.ruff": "explicit",
8
+ "source.organizeImports.ruff": "explicit"
9
+ }
10
+ }
11
+ }
Dockerfile ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.11-slim
2
+
3
+ # Install system dependencies
4
+ RUN apt-get update && apt-get install -y \
5
+ wget \
6
+ netcat-traditional \
7
+ gnupg \
8
+ curl \
9
+ unzip \
10
+ xvfb \
11
+ libgconf-2-4 \
12
+ libxss1 \
13
+ libnss3 \
14
+ libnspr4 \
15
+ libasound2 \
16
+ libatk1.0-0 \
17
+ libatk-bridge2.0-0 \
18
+ libcups2 \
19
+ libdbus-1-3 \
20
+ libdrm2 \
21
+ libgbm1 \
22
+ libgtk-3-0 \
23
+ libxcomposite1 \
24
+ libxdamage1 \
25
+ libxfixes3 \
26
+ libxrandr2 \
27
+ xdg-utils \
28
+ fonts-liberation \
29
+ dbus \
30
+ xauth \
31
+ xvfb \
32
+ x11vnc \
33
+ tigervnc-tools \
34
+ supervisor \
35
+ net-tools \
36
+ procps \
37
+ git \
38
+ python3-numpy \
39
+ fontconfig \
40
+ fonts-dejavu \
41
+ fonts-dejavu-core \
42
+ fonts-dejavu-extra \
43
+ && rm -rf /var/lib/apt/lists/*
44
+
45
+ # Install noVNC
46
+ RUN git clone https://github.com/novnc/noVNC.git /opt/novnc \
47
+ && git clone https://github.com/novnc/websockify /opt/novnc/utils/websockify \
48
+ && ln -s /opt/novnc/vnc.html /opt/novnc/index.html
49
+
50
+ # Set platform for ARM64 compatibility
51
+ ARG TARGETPLATFORM=linux/amd64
52
+
53
+ # Set up working directory
54
+ WORKDIR /app
55
+
56
+ # Copy requirements and install Python dependencies
57
+ COPY requirements.txt .
58
+ RUN pip install --no-cache-dir -r requirements.txt
59
+
60
+ # Install Playwright and browsers with system dependencies
61
+ ENV PLAYWRIGHT_BROWSERS_PATH=/ms-playwright
62
+ RUN playwright install --with-deps chromium
63
+ RUN playwright install-deps
64
+
65
+ # Copy the application code
66
+ COPY . .
67
+
68
+ # Set environment variables
69
+ ENV PYTHONUNBUFFERED=1
70
+ ENV BROWSER_USE_LOGGING_LEVEL=info
71
+ ENV CHROME_PATH=/ms-playwright/chromium-*/chrome-linux/chrome
72
+ ENV ANONYMIZED_TELEMETRY=false
73
+ ENV DISPLAY=:99
74
+ ENV RESOLUTION=1920x1080x24
75
+ ENV VNC_PASSWORD=vncpassword
76
+ ENV CHROME_PERSISTENT_SESSION=true
77
+ ENV RESOLUTION_WIDTH=1920
78
+ ENV RESOLUTION_HEIGHT=1080
79
+
80
+ # Set up supervisor configuration
81
+ RUN mkdir -p /var/log/supervisor
82
+ COPY supervisord.conf /etc/supervisor/conf.d/supervisord.conf
83
+
84
+ EXPOSE 7788 6080 5901
85
+
86
+ CMD ["/usr/bin/supervisord", "-c", "/etc/supervisor/conf.d/supervisord.conf"]
Dockerfile.arm64 ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.11-slim
2
+
3
+ # Install system dependencies
4
+ RUN apt-get update && apt-get install -y \
5
+ wget \
6
+ gnupg \
7
+ curl \
8
+ unzip \
9
+ xvfb \
10
+ libgconf-2-4 \
11
+ libxss1 \
12
+ libnss3 \
13
+ libnspr4 \
14
+ libasound2 \
15
+ libatk1.0-0 \
16
+ libatk-bridge2.0-0 \
17
+ libcups2 \
18
+ libdbus-1-3 \
19
+ libdrm2 \
20
+ libgbm1 \
21
+ libgtk-3-0 \
22
+ libxcomposite1 \
23
+ libxdamage1 \
24
+ libxfixes3 \
25
+ libxrandr2 \
26
+ xdg-utils \
27
+ fonts-liberation \
28
+ dbus \
29
+ xauth \
30
+ xvfb \
31
+ x11vnc \
32
+ tigervnc-tools \
33
+ supervisor \
34
+ net-tools \
35
+ procps \
36
+ git \
37
+ python3-numpy \
38
+ fontconfig \
39
+ fonts-dejavu \
40
+ fonts-dejavu-core \
41
+ fonts-dejavu-extra \
42
+ && rm -rf /var/lib/apt/lists/*
43
+
44
+ # Install noVNC
45
+ RUN git clone https://github.com/novnc/noVNC.git /opt/novnc \
46
+ && git clone https://github.com/novnc/websockify /opt/novnc/utils/websockify \
47
+ && ln -s /opt/novnc/vnc.html /opt/novnc/index.html
48
+
49
+ # Set platform explicitly for ARM64
50
+ ARG TARGETPLATFORM=linux/arm64
51
+
52
+ # Set up working directory
53
+ WORKDIR /app
54
+
55
+ # Copy requirements and install Python dependencies
56
+ COPY requirements.txt .
57
+ RUN pip install --no-cache-dir -r requirements.txt
58
+
59
+ # Install Playwright and browsers with system dependencies optimized for ARM64
60
+ ENV PLAYWRIGHT_BROWSERS_PATH=/ms-playwright
61
+ RUN PLAYWRIGHT_SKIP_BROWSER_DOWNLOAD=1 pip install playwright && \
62
+ playwright install --with-deps chromium
63
+
64
+ # Copy the application code
65
+ COPY . .
66
+
67
+ # Set environment variables
68
+ ENV PYTHONUNBUFFERED=1
69
+ ENV BROWSER_USE_LOGGING_LEVEL=info
70
+ ENV CHROME_PATH=/ms-playwright/chromium-*/chrome-linux/chrome
71
+ ENV ANONYMIZED_TELEMETRY=false
72
+ ENV DISPLAY=:99
73
+ ENV RESOLUTION=1920x1080x24
74
+ ENV VNC_PASSWORD=vncpassword
75
+ ENV CHROME_PERSISTENT_SESSION=true
76
+ ENV RESOLUTION_WIDTH=1920
77
+ ENV RESOLUTION_HEIGHT=1080
78
+
79
+ # Set up supervisor configuration
80
+ RUN mkdir -p /var/log/supervisor
81
+ COPY supervisord.conf /etc/supervisor/conf.d/supervisord.conf
82
+
83
+ EXPOSE 7788 6080 5901
84
+
85
+ CMD ["/usr/bin/supervisord", "-c", "/etc/supervisor/conf.d/supervisord.conf"]
LICENSE ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) 2024 Browser Use Inc.
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
README.md CHANGED
@@ -1,12 +1,233 @@
1
  ---
2
- title: Automality
3
- emoji: ๐Ÿ‘€
4
- colorFrom: green
5
- colorTo: purple
6
  sdk: gradio
7
- sdk_version: 5.21.0
8
- app_file: app.py
9
- pinned: false
10
  ---
 
11
 
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ title: automality
3
+ app_file: webui.py
 
 
4
  sdk: gradio
5
+ sdk_version: 5.10.0
 
 
6
  ---
7
+ <img src="./assets/web-ui.png" alt="Browser Use Web UI" width="full"/>
8
 
9
+ <br/>
10
+
11
+ [![GitHub stars](https://img.shields.io/github/stars/browser-use/web-ui?style=social)](https://github.com/browser-use/web-ui/stargazers)
12
+ [![Discord](https://img.shields.io/discord/1303749220842340412?color=7289DA&label=Discord&logo=discord&logoColor=white)](https://link.browser-use.com/discord)
13
+ [![Documentation](https://img.shields.io/badge/Documentation-๐Ÿ“•-blue)](https://docs.browser-use.com)
14
+ [![WarmShao](https://img.shields.io/twitter/follow/warmshao?style=social)](https://x.com/warmshao)
15
+
16
+ This project builds upon the foundation of the [browser-use](https://github.com/browser-use/browser-use), which is designed to make websites accessible for AI agents.
17
+
18
+ We would like to officially thank [WarmShao](https://github.com/warmshao) for his contribution to this project.
19
+
20
+ **WebUI:** is built on Gradio and supports most of `browser-use` functionalities. This UI is designed to be user-friendly and enables easy interaction with the browser agent.
21
+
22
+ **Expanded LLM Support:** We've integrated support for various Large Language Models (LLMs), including: Google, OpenAI, Azure OpenAI, Anthropic, DeepSeek, Ollama etc. And we plan to add support for even more models in the future.
23
+
24
+ **Custom Browser Support:** You can use your own browser with our tool, eliminating the need to re-login to sites or deal with other authentication challenges. This feature also supports high-definition screen recording.
25
+
26
+ **Persistent Browser Sessions:** You can choose to keep the browser window open between AI tasks, allowing you to see the complete history and state of AI interactions.
27
+
28
+ <video src="https://github.com/user-attachments/assets/56bc7080-f2e3-4367-af22-6bf2245ff6cb" controls="controls">Your browser does not support playing this video!</video>
29
+
30
+ ## Installation Guide
31
+
32
+ ### Prerequisites
33
+ - Python 3.11 or higher
34
+ - Git (for cloning the repository)
35
+
36
+ ### Option 1: Local Installation
37
+
38
+ Read the [quickstart guide](https://docs.browser-use.com/quickstart#prepare-the-environment) or follow the steps below to get started.
39
+
40
+ #### Step 1: Clone the Repository
41
+ ```bash
42
+ git clone https://github.com/browser-use/web-ui.git
43
+ cd web-ui
44
+ ```
45
+
46
+ #### Step 2: Set Up Python Environment
47
+ We recommend using [uv](https://docs.astral.sh/uv/) for managing the Python environment.
48
+
49
+ Using uv (recommended):
50
+ ```bash
51
+ uv venv --python 3.11
52
+ ```
53
+
54
+ Activate the virtual environment:
55
+ - Windows (Command Prompt):
56
+ ```cmd
57
+ .venv\Scripts\activate
58
+ ```
59
+ - Windows (PowerShell):
60
+ ```powershell
61
+ .\.venv\Scripts\Activate.ps1
62
+ ```
63
+ - macOS/Linux:
64
+ ```bash
65
+ source .venv/bin/activate
66
+ ```
67
+
68
+ #### Step 3: Install Dependencies
69
+ Install Python packages:
70
+ ```bash
71
+ uv pip install -r requirements.txt
72
+ ```
73
+
74
+ Install Playwright:
75
+ ```bash
76
+ playwright install
77
+ ```
78
+
79
+ #### Step 4: Configure Environment
80
+ 1. Create a copy of the example environment file:
81
+ - Windows (Command Prompt):
82
+ ```bash
83
+ copy .env.example .env
84
+ ```
85
+ - macOS/Linux/Windows (PowerShell):
86
+ ```bash
87
+ cp .env.example .env
88
+ ```
89
+ 2. Open `.env` in your preferred text editor and add your API keys and other settings
90
+
91
+ ### Option 2: Docker Installation
92
+
93
+ #### Prerequisites
94
+ - Docker and Docker Compose installed
95
+ - [Docker Desktop](https://www.docker.com/products/docker-desktop/) (For Windows/macOS)
96
+ - [Docker Engine](https://docs.docker.com/engine/install/) and [Docker Compose](https://docs.docker.com/compose/install/) (For Linux)
97
+
98
+ #### Installation Steps
99
+ 1. Clone the repository:
100
+ ```bash
101
+ git clone https://github.com/browser-use/web-ui.git
102
+ cd web-ui
103
+ ```
104
+
105
+ 2. Create and configure environment file:
106
+ - Windows (Command Prompt):
107
+ ```bash
108
+ copy .env.example .env
109
+ ```
110
+ - macOS/Linux/Windows (PowerShell):
111
+ ```bash
112
+ cp .env.example .env
113
+ ```
114
+ Edit `.env` with your preferred text editor and add your API keys
115
+
116
+ 3. Run with Docker:
117
+ ```bash
118
+ # Build and start the container with default settings (browser closes after AI tasks)
119
+ docker compose up --build
120
+ ```
121
+ ```bash
122
+ # Or run with persistent browser (browser stays open between AI tasks)
123
+ CHROME_PERSISTENT_SESSION=true docker compose up --build
124
+ ```
125
+
126
+
127
+ 4. Access the Application:
128
+ - Web Interface: Open `http://localhost:7788` in your browser
129
+ - VNC Viewer (for watching browser interactions): Open `http://localhost:6080/vnc.html`
130
+ - Default VNC password: "youvncpassword"
131
+ - Can be changed by setting `VNC_PASSWORD` in your `.env` file
132
+
133
+ ## Usage
134
+
135
+ ### Local Setup
136
+ 1. **Run the WebUI:**
137
+ After completing the installation steps above, start the application:
138
+ ```bash
139
+ python webui.py --ip 127.0.0.1 --port 7788
140
+ ```
141
+ 2. WebUI options:
142
+ - `--ip`: The IP address to bind the WebUI to. Default is `127.0.0.1`.
143
+ - `--port`: The port to bind the WebUI to. Default is `7788`.
144
+ - `--theme`: The theme for the user interface. Default is `Ocean`.
145
+ - **Default**: The standard theme with a balanced design.
146
+ - **Soft**: A gentle, muted color scheme for a relaxed viewing experience.
147
+ - **Monochrome**: A grayscale theme with minimal color for simplicity and focus.
148
+ - **Glass**: A sleek, semi-transparent design for a modern appearance.
149
+ - **Origin**: A classic, retro-inspired theme for a nostalgic feel.
150
+ - **Citrus**: A vibrant, citrus-inspired palette with bright and fresh colors.
151
+ - **Ocean** (default): A blue, ocean-inspired theme providing a calming effect.
152
+ - `--dark-mode`: Enables dark mode for the user interface.
153
+ 3. **Access the WebUI:** Open your web browser and navigate to `http://127.0.0.1:7788`.
154
+ 4. **Using Your Own Browser(Optional):**
155
+ - Set `CHROME_PATH` to the executable path of your browser and `CHROME_USER_DATA` to the user data directory of your browser. Leave `CHROME_USER_DATA` empty if you want to use local user data.
156
+ - Windows
157
+ ```env
158
+ CHROME_PATH="C:\Program Files\Google\Chrome\Application\chrome.exe"
159
+ CHROME_USER_DATA="C:\Users\YourUsername\AppData\Local\Google\Chrome\User Data"
160
+ ```
161
+ > Note: Replace `YourUsername` with your actual Windows username for Windows systems.
162
+ - Mac
163
+ ```env
164
+ CHROME_PATH="/Applications/Google Chrome.app/Contents/MacOS/Google Chrome"
165
+ CHROME_USER_DATA="/Users/YourUsername/Library/Application Support/Google/Chrome"
166
+ ```
167
+ - Close all Chrome windows
168
+ - Open the WebUI in a non-Chrome browser, such as Firefox or Edge. This is important because the persistent browser context will use the Chrome data when running the agent.
169
+ - Check the "Use Own Browser" option within the Browser Settings.
170
+ 5. **Keep Browser Open(Optional):**
171
+ - Set `CHROME_PERSISTENT_SESSION=true` in the `.env` file.
172
+
173
+ ### Docker Setup
174
+ 1. **Environment Variables:**
175
+ - All configuration is done through the `.env` file
176
+ - Available environment variables:
177
+ ```
178
+ # LLM API Keys
179
+ OPENAI_API_KEY=your_key_here
180
+ ANTHROPIC_API_KEY=your_key_here
181
+ GOOGLE_API_KEY=your_key_here
182
+
183
+ # Browser Settings
184
+ CHROME_PERSISTENT_SESSION=true # Set to true to keep browser open between AI tasks
185
+ RESOLUTION=1920x1080x24 # Custom resolution format: WIDTHxHEIGHTxDEPTH
186
+ RESOLUTION_WIDTH=1920 # Custom width in pixels
187
+ RESOLUTION_HEIGHT=1080 # Custom height in pixels
188
+
189
+ # VNC Settings
190
+ VNC_PASSWORD=your_vnc_password # Optional, defaults to "vncpassword"
191
+ ```
192
+
193
+ 2. **Platform Support:**
194
+ - Supports both AMD64 and ARM64 architectures
195
+ - For ARM64 systems (e.g., Apple Silicon Macs), the container will automatically use the appropriate image
196
+
197
+ 3. **Browser Persistence Modes:**
198
+ - **Default Mode (CHROME_PERSISTENT_SESSION=false):**
199
+ - Browser opens and closes with each AI task
200
+ - Clean state for each interaction
201
+ - Lower resource usage
202
+
203
+ - **Persistent Mode (CHROME_PERSISTENT_SESSION=true):**
204
+ - Browser stays open between AI tasks
205
+ - Maintains history and state
206
+ - Allows viewing previous AI interactions
207
+ - Set in `.env` file or via environment variable when starting container
208
+
209
+ 4. **Viewing Browser Interactions:**
210
+ - Access the noVNC viewer at `http://localhost:6080/vnc.html`
211
+ - Enter the VNC password (default: "vncpassword" or what you set in VNC_PASSWORD)
212
+ - Direct VNC access available on port 5900 (mapped to container port 5901)
213
+ - You can now see all browser interactions in real-time
214
+
215
+ 5. **Container Management:**
216
+ ```bash
217
+ # Start with persistent browser
218
+ CHROME_PERSISTENT_SESSION=true docker compose up -d
219
+
220
+ # Start with default mode (browser closes after tasks)
221
+ docker compose up -d
222
+
223
+ # View logs
224
+ docker compose logs -f
225
+
226
+ # Stop the container
227
+ docker compose down
228
+ ```
229
+
230
+ ## Changelog
231
+ - [x] **2025/01/26:** Thanks to @vvincent1234. Now browser-use-webui can combine with DeepSeek-r1 to engage in deep thinking!
232
+ - [x] **2025/01/10:** Thanks to @casistack. Now we have Docker Setup option and also Support keep browser open between tasks.[Video tutorial demo](https://github.com/browser-use/web-ui/issues/1#issuecomment-2582511750).
233
+ - [x] **2025/01/06:** Thanks to @richard-devbot. A New and Well-Designed WebUI is released. [Video tutorial demo](https://github.com/warmshao/browser-use-webui/issues/1#issuecomment-2573393113).
SECURITY.md ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ## Reporting Security Issues
2
+
3
+ If you believe you have found a security vulnerability in browser-use, please report it through coordinated disclosure.
4
+
5
+ **Please do not report security vulnerabilities through the repository issues, discussions, or pull requests.**
6
+
7
+ Instead, please open a new [Github security advisory](https://github.com/browser-use/web-ui/security/advisories/new).
8
+
9
+ Please include as much of the information listed below as you can to help me better understand and resolve the issue:
10
+
11
+ * The type of issue (e.g., buffer overflow, SQL injection, or cross-site scripting)
12
+ * Full paths of source file(s) related to the manifestation of the issue
13
+ * The location of the affected source code (tag/branch/commit or direct URL)
14
+ * Any special configuration required to reproduce the issue
15
+ * Step-by-step instructions to reproduce the issue
16
+ * Proof-of-concept or exploit code (if possible)
17
+ * Impact of the issue, including how an attacker might exploit the issue
18
+
19
+ This information will help me triage your report more quickly.
assets/examples/test.png ADDED

Git LFS Details

  • SHA256: 23e4fe8c9836cd35393315a3cca074dbd55a8645289ea337e3300269dda06900
  • Pointer size: 131 Bytes
  • Size of remote file: 423 kB
assets/web-ui.png ADDED
docker-compose.yml ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ services:
2
+ browser-use-webui:
3
+ platform: linux/amd64
4
+ build:
5
+ context: .
6
+ dockerfile: ${DOCKERFILE:-Dockerfile}
7
+ args:
8
+ TARGETPLATFORM: ${TARGETPLATFORM:-linux/amd64}
9
+ ports:
10
+ - "7788:7788" # Gradio default port
11
+ - "6080:6080" # noVNC web interface
12
+ - "5901:5901" # VNC port
13
+ - "9222:9222" # Chrome remote debugging port
14
+ environment:
15
+ - OPENAI_ENDPOINT=${OPENAI_ENDPOINT:-https://api.openai.com/v1}
16
+ - OPENAI_API_KEY=${OPENAI_API_KEY:-}
17
+ - ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY:-}
18
+ - GOOGLE_API_KEY=${GOOGLE_API_KEY:-}
19
+ - AZURE_OPENAI_ENDPOINT=${AZURE_OPENAI_ENDPOINT:-}
20
+ - AZURE_OPENAI_API_KEY=${AZURE_OPENAI_API_KEY:-}
21
+ - DEEPSEEK_ENDPOINT=${DEEPSEEK_ENDPOINT:-https://api.deepseek.com}
22
+ - DEEPSEEK_API_KEY=${DEEPSEEK_API_KEY:-}
23
+ - BROWSER_USE_LOGGING_LEVEL=${BROWSER_USE_LOGGING_LEVEL:-info}
24
+ - ANONYMIZED_TELEMETRY=false
25
+ - CHROME_PATH=/usr/bin/google-chrome
26
+ - CHROME_USER_DATA=/app/data/chrome_data
27
+ - CHROME_PERSISTENT_SESSION=${CHROME_PERSISTENT_SESSION:-false}
28
+ - DISPLAY=:99
29
+ - PLAYWRIGHT_BROWSERS_PATH=/ms-playwright
30
+ - RESOLUTION=${RESOLUTION:-1920x1080x24}
31
+ - RESOLUTION_WIDTH=${RESOLUTION_WIDTH:-1920}
32
+ - RESOLUTION_HEIGHT=${RESOLUTION_HEIGHT:-1080}
33
+ - VNC_PASSWORD=${VNC_PASSWORD:-vncpassword}
34
+ - CHROME_DEBUGGING_PORT=9222
35
+ - CHROME_DEBUGGING_HOST=localhost
36
+ volumes:
37
+ - /tmp/.X11-unix:/tmp/.X11-unix
38
+ restart: unless-stopped
39
+ shm_size: '2gb'
40
+ cap_add:
41
+ - SYS_ADMIN
42
+ security_opt:
43
+ - seccomp=unconfined
44
+ tmpfs:
45
+ - /tmp
46
+ healthcheck:
47
+ test: ["CMD", "nc", "-z", "localhost", "5901"]
48
+ interval: 10s
49
+ timeout: 5s
50
+ retries: 3
entrypoint.sh ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ # Start supervisord in the foreground to properly manage child processes
4
+ exec /usr/bin/supervisord -n -c /etc/supervisor/conf.d/supervisord.conf
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ browser-use==0.1.29
2
+ pyperclip==1.9.0
3
+ gradio==5.10.0
4
+ json-repair
5
+ langchain-mistralai==0.2.4
src/__init__.py ADDED
File without changes
src/agent/__init__.py ADDED
File without changes
src/agent/custom_agent.py ADDED
@@ -0,0 +1,550 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import logging
3
+ import pdb
4
+ import traceback
5
+ from typing import Optional, Type, List, Dict, Any, Callable
6
+ from PIL import Image, ImageDraw, ImageFont
7
+ import os
8
+ import base64
9
+ import io
10
+ import platform
11
+ from browser_use.agent.prompts import SystemPrompt, AgentMessagePrompt
12
+ from browser_use.agent.service import Agent
13
+ from browser_use.agent.views import (
14
+ ActionResult,
15
+ ActionModel,
16
+ AgentHistoryList,
17
+ AgentOutput,
18
+ AgentHistory,
19
+ )
20
+ from browser_use.browser.browser import Browser
21
+ from browser_use.browser.context import BrowserContext
22
+ from browser_use.browser.views import BrowserStateHistory
23
+ from browser_use.controller.service import Controller
24
+ from browser_use.telemetry.views import (
25
+ AgentEndTelemetryEvent,
26
+ AgentRunTelemetryEvent,
27
+ AgentStepTelemetryEvent,
28
+ )
29
+ from browser_use.utils import time_execution_async
30
+ from langchain_core.language_models.chat_models import BaseChatModel
31
+ from langchain_core.messages import (
32
+ BaseMessage,
33
+ )
34
+ from json_repair import repair_json
35
+ from src.utils.agent_state import AgentState
36
+
37
+ from .custom_message_manager import CustomMessageManager
38
+ from .custom_views import CustomAgentOutput, CustomAgentStepInfo
39
+
40
+ logger = logging.getLogger(__name__)
41
+
42
+
43
+ class CustomAgent(Agent):
44
+ def __init__(
45
+ self,
46
+ task: str,
47
+ llm: BaseChatModel,
48
+ add_infos: str = "",
49
+ browser: Browser | None = None,
50
+ browser_context: BrowserContext | None = None,
51
+ controller: Controller = Controller(),
52
+ use_vision: bool = True,
53
+ save_conversation_path: Optional[str] = None,
54
+ max_failures: int = 5,
55
+ retry_delay: int = 10,
56
+ system_prompt_class: Type[SystemPrompt] = SystemPrompt,
57
+ agent_prompt_class: Type[AgentMessagePrompt] = AgentMessagePrompt,
58
+ max_input_tokens: int = 128000,
59
+ validate_output: bool = False,
60
+ include_attributes: list[str] = [
61
+ "title",
62
+ "type",
63
+ "name",
64
+ "role",
65
+ "tabindex",
66
+ "aria-label",
67
+ "placeholder",
68
+ "value",
69
+ "alt",
70
+ "aria-expanded",
71
+ ],
72
+ max_error_length: int = 400,
73
+ max_actions_per_step: int = 10,
74
+ tool_call_in_content: bool = True,
75
+ agent_state: AgentState = None,
76
+ initial_actions: Optional[List[Dict[str, Dict[str, Any]]]] = None,
77
+ # Cloud Callbacks
78
+ register_new_step_callback: Callable[['BrowserState', 'AgentOutput', int], None] | None = None,
79
+ register_done_callback: Callable[['AgentHistoryList'], None] | None = None,
80
+ tool_calling_method: Optional[str] = 'auto',
81
+ ):
82
+ super().__init__(
83
+ task=task,
84
+ llm=llm,
85
+ browser=browser,
86
+ browser_context=browser_context,
87
+ controller=controller,
88
+ use_vision=use_vision,
89
+ save_conversation_path=save_conversation_path,
90
+ max_failures=max_failures,
91
+ retry_delay=retry_delay,
92
+ system_prompt_class=system_prompt_class,
93
+ max_input_tokens=max_input_tokens,
94
+ validate_output=validate_output,
95
+ include_attributes=include_attributes,
96
+ max_error_length=max_error_length,
97
+ max_actions_per_step=max_actions_per_step,
98
+ tool_call_in_content=tool_call_in_content,
99
+ initial_actions=initial_actions,
100
+ register_new_step_callback=register_new_step_callback,
101
+ register_done_callback=register_done_callback,
102
+ tool_calling_method=tool_calling_method
103
+ )
104
+ if self.model_name in ["deepseek-reasoner"] or "deepseek-r1" in self.model_name:
105
+ # deepseek-reasoner does not support function calling
106
+ self.use_deepseek_r1 = True
107
+ # deepseek-reasoner only support 64000 context
108
+ self.max_input_tokens = 64000
109
+ else:
110
+ self.use_deepseek_r1 = False
111
+
112
+ # record last actions
113
+ self._last_actions = None
114
+ # record extract content
115
+ self.extracted_content = ""
116
+ # custom new info
117
+ self.add_infos = add_infos
118
+ # agent_state for Stop
119
+ self.agent_state = agent_state
120
+ self.agent_prompt_class = agent_prompt_class
121
+ self.message_manager = CustomMessageManager(
122
+ llm=self.llm,
123
+ task=self.task,
124
+ action_descriptions=self.controller.registry.get_prompt_description(),
125
+ system_prompt_class=self.system_prompt_class,
126
+ agent_prompt_class=agent_prompt_class,
127
+ max_input_tokens=self.max_input_tokens,
128
+ include_attributes=self.include_attributes,
129
+ max_error_length=self.max_error_length,
130
+ max_actions_per_step=self.max_actions_per_step
131
+ )
132
+
133
+ def _setup_action_models(self) -> None:
134
+ """Setup dynamic action models from controller's registry"""
135
+ # Get the dynamic action model from controller's registry
136
+ self.ActionModel = self.controller.registry.create_action_model()
137
+ # Create output model with the dynamic actions
138
+ self.AgentOutput = CustomAgentOutput.type_with_custom_actions(self.ActionModel)
139
+
140
+ def _log_response(self, response: CustomAgentOutput) -> None:
141
+ """Log the model's response"""
142
+ if "Success" in response.current_state.prev_action_evaluation:
143
+ emoji = "โœ…"
144
+ elif "Failed" in response.current_state.prev_action_evaluation:
145
+ emoji = "โŒ"
146
+ else:
147
+ emoji = "๐Ÿคท"
148
+
149
+ logger.info(f"{emoji} Eval: {response.current_state.prev_action_evaluation}")
150
+ logger.info(f"๐Ÿง  New Memory: {response.current_state.important_contents}")
151
+ logger.info(f"โณ Task Progress: \n{response.current_state.task_progress}")
152
+ logger.info(f"๐Ÿ“‹ Future Plans: \n{response.current_state.future_plans}")
153
+ logger.info(f"๐Ÿค” Thought: {response.current_state.thought}")
154
+ logger.info(f"๐ŸŽฏ Summary: {response.current_state.summary}")
155
+ for i, action in enumerate(response.action):
156
+ logger.info(
157
+ f"๐Ÿ› ๏ธ Action {i + 1}/{len(response.action)}: {action.model_dump_json(exclude_unset=True)}"
158
+ )
159
+
160
+ def update_step_info(
161
+ self, model_output: CustomAgentOutput, step_info: CustomAgentStepInfo = None
162
+ ):
163
+ """
164
+ update step info
165
+ """
166
+ if step_info is None:
167
+ return
168
+
169
+ step_info.step_number += 1
170
+ important_contents = model_output.current_state.important_contents
171
+ if (
172
+ important_contents
173
+ and "None" not in important_contents
174
+ and important_contents not in step_info.memory
175
+ ):
176
+ step_info.memory += important_contents + "\n"
177
+
178
+ task_progress = model_output.current_state.task_progress
179
+ if task_progress and "None" not in task_progress:
180
+ step_info.task_progress = task_progress
181
+
182
+ future_plans = model_output.current_state.future_plans
183
+ if future_plans and "None" not in future_plans:
184
+ step_info.future_plans = future_plans
185
+
186
+ @time_execution_async("--get_next_action")
187
+ async def get_next_action(self, input_messages: list[BaseMessage]) -> AgentOutput:
188
+ """Get next action from LLM based on current state"""
189
+ messages_to_process = (
190
+ self.message_manager.merge_successive_human_messages(input_messages)
191
+ if self.use_deepseek_r1
192
+ else input_messages
193
+ )
194
+
195
+ ai_message = self.llm.invoke(messages_to_process)
196
+ self.message_manager._add_message_with_tokens(ai_message)
197
+
198
+ if self.use_deepseek_r1:
199
+ logger.info("๐Ÿคฏ Start Deep Thinking: ")
200
+ logger.info(ai_message.reasoning_content)
201
+ logger.info("๐Ÿคฏ End Deep Thinking")
202
+
203
+ if isinstance(ai_message.content, list):
204
+ ai_content = ai_message.content[0]
205
+ else:
206
+ ai_content = ai_message.content
207
+
208
+ ai_content = ai_content.replace("```json", "").replace("```", "")
209
+ ai_content = repair_json(ai_content)
210
+ parsed_json = json.loads(ai_content)
211
+ parsed: AgentOutput = self.AgentOutput(**parsed_json)
212
+
213
+ if parsed is None:
214
+ logger.debug(ai_message.content)
215
+ raise ValueError('Could not parse response.')
216
+
217
+ # Limit actions to maximum allowed per step
218
+ parsed.action = parsed.action[: self.max_actions_per_step]
219
+ self._log_response(parsed)
220
+ self.n_steps += 1
221
+
222
+ return parsed
223
+
224
+ @time_execution_async("--step")
225
+ async def step(self, step_info: Optional[CustomAgentStepInfo] = None) -> None:
226
+ """Execute one step of the task"""
227
+ logger.info(f"\n๐Ÿ“ Step {self.n_steps}")
228
+ state = None
229
+ model_output = None
230
+ result: list[ActionResult] = []
231
+
232
+ try:
233
+ state = await self.browser_context.get_state(use_vision=self.use_vision)
234
+ self.message_manager.add_state_message(state, self._last_actions, self._last_result, step_info)
235
+ input_messages = self.message_manager.get_messages()
236
+ try:
237
+ model_output = await self.get_next_action(input_messages)
238
+ if self.register_new_step_callback:
239
+ self.register_new_step_callback(state, model_output, self.n_steps)
240
+ self.update_step_info(model_output, step_info)
241
+ logger.info(f"๐Ÿง  All Memory: \n{step_info.memory}")
242
+ self._save_conversation(input_messages, model_output)
243
+ if self.model_name != "deepseek-reasoner":
244
+ # remove prev message
245
+ self.message_manager._remove_state_message_by_index(-1)
246
+ except Exception as e:
247
+ # model call failed, remove last state message from history
248
+ self.message_manager._remove_state_message_by_index(-1)
249
+ raise e
250
+
251
+ actions: list[ActionModel] = model_output.action
252
+ result: list[ActionResult] = await self.controller.multi_act(
253
+ actions, self.browser_context
254
+ )
255
+ if len(result) != len(actions):
256
+ # I think something changes, such information should let LLM know
257
+ for ri in range(len(result), len(actions)):
258
+ result.append(ActionResult(extracted_content=None,
259
+ include_in_memory=True,
260
+ error=f"{actions[ri].model_dump_json(exclude_unset=True)} is Failed to execute. \
261
+ Something new appeared after action {actions[len(result) - 1].model_dump_json(exclude_unset=True)}",
262
+ is_done=False))
263
+ if len(actions) == 0:
264
+ # TODO: fix no action case
265
+ result = [ActionResult(is_done=True, extracted_content=step_info.memory, include_in_memory=True)]
266
+ for ret_ in result:
267
+ if "Extracted page" in ret_.extracted_content:
268
+ # record every extracted page
269
+ self.extracted_content += ret_.extracted_content
270
+ self._last_result = result
271
+ self._last_actions = actions
272
+ if len(result) > 0 and result[-1].is_done:
273
+ if not self.extracted_content:
274
+ self.extracted_content = step_info.memory
275
+ result[-1].extracted_content = self.extracted_content
276
+ logger.info(f"๐Ÿ“„ Result: {result[-1].extracted_content}")
277
+
278
+ self.consecutive_failures = 0
279
+
280
+ except Exception as e:
281
+ result = await self._handle_step_error(e)
282
+ self._last_result = result
283
+
284
+ finally:
285
+ actions = [a.model_dump(exclude_unset=True) for a in model_output.action] if model_output else []
286
+ self.telemetry.capture(
287
+ AgentStepTelemetryEvent(
288
+ agent_id=self.agent_id,
289
+ step=self.n_steps,
290
+ actions=actions,
291
+ consecutive_failures=self.consecutive_failures,
292
+ step_error=[r.error for r in result if r.error] if result else ['No result'],
293
+ )
294
+ )
295
+ if not result:
296
+ return
297
+
298
+ if state:
299
+ self._make_history_item(model_output, state, result)
300
+
301
+ async def run(self, max_steps: int = 100) -> AgentHistoryList:
302
+ """Execute the task with maximum number of steps"""
303
+ try:
304
+ self._log_agent_run()
305
+
306
+ # Execute initial actions if provided
307
+ if self.initial_actions:
308
+ result = await self.controller.multi_act(self.initial_actions, self.browser_context, check_for_new_elements=False)
309
+ self._last_result = result
310
+
311
+ step_info = CustomAgentStepInfo(
312
+ task=self.task,
313
+ add_infos=self.add_infos,
314
+ step_number=1,
315
+ max_steps=max_steps,
316
+ memory="",
317
+ task_progress="",
318
+ future_plans=""
319
+ )
320
+
321
+ for step in range(max_steps):
322
+ # 1) Check if stop requested
323
+ if self.agent_state and self.agent_state.is_stop_requested():
324
+ logger.info("๐Ÿ›‘ Stop requested by user")
325
+ self._create_stop_history_item()
326
+ break
327
+
328
+ # 2) Store last valid state before step
329
+ if self.browser_context and self.agent_state:
330
+ state = await self.browser_context.get_state(use_vision=self.use_vision)
331
+ self.agent_state.set_last_valid_state(state)
332
+
333
+ if self._too_many_failures():
334
+ break
335
+
336
+ # 3) Do the step
337
+ await self.step(step_info)
338
+
339
+ if self.history.is_done():
340
+ if (
341
+ self.validate_output and step < max_steps - 1
342
+ ): # if last step, we dont need to validate
343
+ if not await self._validate_output():
344
+ continue
345
+
346
+ logger.info("โœ… Task completed successfully")
347
+ break
348
+ else:
349
+ logger.info("โŒ Failed to complete task in maximum steps")
350
+ if not self.extracted_content:
351
+ self.history.history[-1].result[-1].extracted_content = step_info.memory
352
+ else:
353
+ self.history.history[-1].result[-1].extracted_content = self.extracted_content
354
+
355
+ return self.history
356
+
357
+ finally:
358
+ self.telemetry.capture(
359
+ AgentEndTelemetryEvent(
360
+ agent_id=self.agent_id,
361
+ success=self.history.is_done(),
362
+ steps=self.n_steps,
363
+ max_steps_reached=self.n_steps >= max_steps,
364
+ errors=self.history.errors(),
365
+ )
366
+ )
367
+
368
+ if not self.injected_browser_context:
369
+ await self.browser_context.close()
370
+
371
+ if not self.injected_browser and self.browser:
372
+ await self.browser.close()
373
+
374
+ if self.generate_gif:
375
+ output_path: str = 'agent_history.gif'
376
+ if isinstance(self.generate_gif, str):
377
+ output_path = self.generate_gif
378
+
379
+ self.create_history_gif(output_path=output_path)
380
+
381
+ def _create_stop_history_item(self):
382
+ """Create a history item for when the agent is stopped."""
383
+ try:
384
+ # Attempt to retrieve the last valid state from agent_state
385
+ state = None
386
+ if self.agent_state:
387
+ last_state = self.agent_state.get_last_valid_state()
388
+ if last_state:
389
+ # Convert to BrowserStateHistory
390
+ state = BrowserStateHistory(
391
+ url=getattr(last_state, 'url', ""),
392
+ title=getattr(last_state, 'title', ""),
393
+ tabs=getattr(last_state, 'tabs', []),
394
+ interacted_element=[None],
395
+ screenshot=getattr(last_state, 'screenshot', None)
396
+ )
397
+ else:
398
+ state = self._create_empty_state()
399
+ else:
400
+ state = self._create_empty_state()
401
+
402
+ # Create a final item in the agent history indicating done
403
+ stop_history = AgentHistory(
404
+ model_output=None,
405
+ state=state,
406
+ result=[ActionResult(extracted_content=None, error=None, is_done=True)]
407
+ )
408
+ self.history.history.append(stop_history)
409
+
410
+ except Exception as e:
411
+ logger.error(f"Error creating stop history item: {e}")
412
+ # Create empty state as fallback
413
+ state = self._create_empty_state()
414
+ stop_history = AgentHistory(
415
+ model_output=None,
416
+ state=state,
417
+ result=[ActionResult(extracted_content=None, error=None, is_done=True)]
418
+ )
419
+ self.history.history.append(stop_history)
420
+
421
+ def _convert_to_browser_state_history(self, browser_state):
422
+ return BrowserStateHistory(
423
+ url=getattr(browser_state, 'url', ""),
424
+ title=getattr(browser_state, 'title', ""),
425
+ tabs=getattr(browser_state, 'tabs', []),
426
+ interacted_element=[None],
427
+ screenshot=getattr(browser_state, 'screenshot', None)
428
+ )
429
+
430
+ def _create_empty_state(self):
431
+ return BrowserStateHistory(
432
+ url="",
433
+ title="",
434
+ tabs=[],
435
+ interacted_element=[None],
436
+ screenshot=None
437
+ )
438
+
439
+ def create_history_gif(
440
+ self,
441
+ output_path: str = 'agent_history.gif',
442
+ duration: int = 3000,
443
+ show_goals: bool = True,
444
+ show_task: bool = True,
445
+ show_logo: bool = False,
446
+ font_size: int = 40,
447
+ title_font_size: int = 56,
448
+ goal_font_size: int = 44,
449
+ margin: int = 40,
450
+ line_spacing: float = 1.5,
451
+ ) -> None:
452
+ """Create a GIF from the agent's history with overlaid task and goal text."""
453
+ if not self.history.history:
454
+ logger.warning('No history to create GIF from')
455
+ return
456
+
457
+ images = []
458
+ # if history is empty or first screenshot is None, we can't create a gif
459
+ if not self.history.history or not self.history.history[0].state.screenshot:
460
+ logger.warning('No history or first screenshot to create GIF from')
461
+ return
462
+
463
+ # Try to load nicer fonts
464
+ try:
465
+ # Try different font options in order of preference
466
+ font_options = ['Helvetica', 'Arial', 'DejaVuSans', 'Verdana']
467
+ font_loaded = False
468
+
469
+ for font_name in font_options:
470
+ try:
471
+ if platform.system() == 'Windows':
472
+ # Need to specify the abs font path on Windows
473
+ font_name = os.path.join(os.getenv('WIN_FONT_DIR', 'C:\\Windows\\Fonts'), font_name + '.ttf')
474
+ regular_font = ImageFont.truetype(font_name, font_size)
475
+ title_font = ImageFont.truetype(font_name, title_font_size)
476
+ goal_font = ImageFont.truetype(font_name, goal_font_size)
477
+ font_loaded = True
478
+ break
479
+ except OSError:
480
+ continue
481
+
482
+ if not font_loaded:
483
+ raise OSError('No preferred fonts found')
484
+
485
+ except OSError:
486
+ regular_font = ImageFont.load_default()
487
+ title_font = ImageFont.load_default()
488
+
489
+ goal_font = regular_font
490
+
491
+ # Load logo if requested
492
+ logo = None
493
+ if show_logo:
494
+ try:
495
+ logo = Image.open('./static/browser-use.png')
496
+ # Resize logo to be small (e.g., 40px height)
497
+ logo_height = 150
498
+ aspect_ratio = logo.width / logo.height
499
+ logo_width = int(logo_height * aspect_ratio)
500
+ logo = logo.resize((logo_width, logo_height), Image.Resampling.LANCZOS)
501
+ except Exception as e:
502
+ logger.warning(f'Could not load logo: {e}')
503
+
504
+ # Create task frame if requested
505
+ if show_task and self.task:
506
+ task_frame = self._create_task_frame(
507
+ self.task,
508
+ self.history.history[0].state.screenshot,
509
+ title_font,
510
+ regular_font,
511
+ logo,
512
+ line_spacing,
513
+ )
514
+ images.append(task_frame)
515
+
516
+ # Process each history item
517
+ for i, item in enumerate(self.history.history, 1):
518
+ if not item.state.screenshot:
519
+ continue
520
+
521
+ # Convert base64 screenshot to PIL Image
522
+ img_data = base64.b64decode(item.state.screenshot)
523
+ image = Image.open(io.BytesIO(img_data))
524
+
525
+ if show_goals and item.model_output:
526
+ image = self._add_overlay_to_image(
527
+ image=image,
528
+ step_number=i,
529
+ goal_text=item.model_output.current_state.thought,
530
+ regular_font=regular_font,
531
+ title_font=title_font,
532
+ margin=margin,
533
+ logo=logo,
534
+ )
535
+
536
+ images.append(image)
537
+
538
+ if images:
539
+ # Save the GIF
540
+ images[0].save(
541
+ output_path,
542
+ save_all=True,
543
+ append_images=images[1:],
544
+ duration=duration,
545
+ loop=0,
546
+ optimize=False,
547
+ )
548
+ logger.info(f'Created GIF at {output_path}')
549
+ else:
550
+ logger.warning('No images found in history to create GIF')
src/agent/custom_message_manager.py ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import logging
4
+ from typing import List, Optional, Type
5
+
6
+ from browser_use.agent.message_manager.service import MessageManager
7
+ from browser_use.agent.message_manager.views import MessageHistory
8
+ from browser_use.agent.prompts import SystemPrompt, AgentMessagePrompt
9
+ from browser_use.agent.views import ActionResult, AgentStepInfo, ActionModel
10
+ from browser_use.browser.views import BrowserState
11
+ from langchain_core.language_models import BaseChatModel
12
+ from langchain_anthropic import ChatAnthropic
13
+ from langchain_core.language_models import BaseChatModel
14
+ from langchain_core.messages import (
15
+ AIMessage,
16
+ BaseMessage,
17
+ HumanMessage,
18
+ ToolMessage
19
+ )
20
+ from langchain_openai import ChatOpenAI
21
+ from ..utils.llm import DeepSeekR1ChatOpenAI
22
+ from .custom_prompts import CustomAgentMessagePrompt
23
+
24
+ logger = logging.getLogger(__name__)
25
+
26
+
27
+ class CustomMessageManager(MessageManager):
28
+ def __init__(
29
+ self,
30
+ llm: BaseChatModel,
31
+ task: str,
32
+ action_descriptions: str,
33
+ system_prompt_class: Type[SystemPrompt],
34
+ agent_prompt_class: Type[AgentMessagePrompt],
35
+ max_input_tokens: int = 128000,
36
+ estimated_characters_per_token: int = 3,
37
+ image_tokens: int = 800,
38
+ include_attributes: list[str] = [],
39
+ max_error_length: int = 400,
40
+ max_actions_per_step: int = 10,
41
+ message_context: Optional[str] = None
42
+ ):
43
+ super().__init__(
44
+ llm=llm,
45
+ task=task,
46
+ action_descriptions=action_descriptions,
47
+ system_prompt_class=system_prompt_class,
48
+ max_input_tokens=max_input_tokens,
49
+ estimated_characters_per_token=estimated_characters_per_token,
50
+ image_tokens=image_tokens,
51
+ include_attributes=include_attributes,
52
+ max_error_length=max_error_length,
53
+ max_actions_per_step=max_actions_per_step,
54
+ message_context=message_context
55
+ )
56
+ self.agent_prompt_class = agent_prompt_class
57
+ # Custom: Move Task info to state_message
58
+ self.history = MessageHistory()
59
+ self._add_message_with_tokens(self.system_prompt)
60
+
61
+ if self.message_context:
62
+ context_message = HumanMessage(content=self.message_context)
63
+ self._add_message_with_tokens(context_message)
64
+
65
+ def cut_messages(self):
66
+ """Get current message list, potentially trimmed to max tokens"""
67
+ diff = self.history.total_tokens - self.max_input_tokens
68
+ min_message_len = 2 if self.message_context is not None else 1
69
+
70
+ while diff > 0 and len(self.history.messages) > min_message_len:
71
+ self.history.remove_message(min_message_len) # alway remove the oldest message
72
+ diff = self.history.total_tokens - self.max_input_tokens
73
+
74
+ def add_state_message(
75
+ self,
76
+ state: BrowserState,
77
+ actions: Optional[List[ActionModel]] = None,
78
+ result: Optional[List[ActionResult]] = None,
79
+ step_info: Optional[AgentStepInfo] = None,
80
+ ) -> None:
81
+ """Add browser state as human message"""
82
+ # otherwise add state message and result to next message (which will not stay in memory)
83
+ state_message = self.agent_prompt_class(
84
+ state,
85
+ actions,
86
+ result,
87
+ include_attributes=self.include_attributes,
88
+ max_error_length=self.max_error_length,
89
+ step_info=step_info,
90
+ ).get_user_message()
91
+ self._add_message_with_tokens(state_message)
92
+
93
+ def _count_text_tokens(self, text: str) -> int:
94
+ if isinstance(self.llm, (ChatOpenAI, ChatAnthropic, DeepSeekR1ChatOpenAI)):
95
+ try:
96
+ tokens = self.llm.get_num_tokens(text)
97
+ except Exception:
98
+ tokens = (
99
+ len(text) // self.estimated_characters_per_token
100
+ ) # Rough estimate if no tokenizer available
101
+ else:
102
+ tokens = (
103
+ len(text) // self.estimated_characters_per_token
104
+ ) # Rough estimate if no tokenizer available
105
+ return tokens
106
+
107
+ def _remove_state_message_by_index(self, remove_ind=-1) -> None:
108
+ """Remove last state message from history"""
109
+ i = len(self.history.messages) - 1
110
+ remove_cnt = 0
111
+ while i >= 0:
112
+ if isinstance(self.history.messages[i].message, HumanMessage):
113
+ remove_cnt += 1
114
+ if remove_cnt == abs(remove_ind):
115
+ self.history.remove_message(i)
116
+ break
117
+ i -= 1
src/agent/custom_prompts.py ADDED
@@ -0,0 +1,228 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pdb
2
+ from typing import List, Optional
3
+
4
+ from browser_use.agent.prompts import SystemPrompt, AgentMessagePrompt
5
+ from browser_use.agent.views import ActionResult, ActionModel
6
+ from browser_use.browser.views import BrowserState
7
+ from langchain_core.messages import HumanMessage, SystemMessage
8
+ from datetime import datetime
9
+
10
+ from .custom_views import CustomAgentStepInfo
11
+
12
+
13
+ class CustomSystemPrompt(SystemPrompt):
14
+ def important_rules(self) -> str:
15
+ """
16
+ Returns the important rules for the agent.
17
+ """
18
+ text = r"""
19
+ 1. RESPONSE FORMAT: You must ALWAYS respond with valid JSON in this exact format:
20
+ {
21
+ "current_state": {
22
+ "prev_action_evaluation": "Success|Failed|Unknown - Analyze the current elements and the image to check if the previous goals/actions are successful like intended by the task. Ignore the action result. The website is the ground truth. Also mention if something unexpected happened like new suggestions in an input field. Shortly state why/why not. Note that the result you output must be consistent with the reasoning you output afterwards. If you consider it to be 'Failed,' you should reflect on this during your thought.",
23
+ "important_contents": "Output important contents closely related to user\'s instruction on the current page. If there is, please output the contents. If not, please output empty string ''.",
24
+ "task_progress": "Task Progress is a general summary of the current contents that have been completed. Just summarize the contents that have been actually completed based on the content at current step and the history operations. Please list each completed item individually, such as: 1. Input username. 2. Input Password. 3. Click confirm button. Please return string type not a list.",
25
+ "future_plans": "Based on the user's request and the current state, outline the remaining steps needed to complete the task. This should be a concise list of actions yet to be performed, such as: 1. Select a date. 2. Choose a specific time slot. 3. Confirm booking. Please return string type not a list.",
26
+ "thought": "Think about the requirements that have been completed in previous operations and the requirements that need to be completed in the next one operation. If your output of prev_action_evaluation is 'Failed', please reflect and output your reflection here.",
27
+ "summary": "Please generate a brief natural language description for the operation in next actions based on your Thought."
28
+ },
29
+ "action": [
30
+ * actions in sequences, please refer to **Common action sequences**. Each output action MUST be formated as: \{action_name\: action_params\}*
31
+ ]
32
+ }
33
+
34
+ 2. ACTIONS: You can specify multiple actions to be executed in sequence.
35
+
36
+ Common action sequences:
37
+ - Form filling: [
38
+ {"input_text": {"index": 1, "text": "username"}},
39
+ {"input_text": {"index": 2, "text": "password"}},
40
+ {"click_element": {"index": 3}}
41
+ ]
42
+ - Navigation and extraction: [
43
+ {"go_to_url": {"url": "https://example.com"}},
44
+ {"extract_page_content": {}}
45
+ ]
46
+
47
+
48
+ 3. ELEMENT INTERACTION:
49
+ - Only use indexes that exist in the provided element list
50
+ - Each element has a unique index number (e.g., "33[:]<button>")
51
+ - Elements marked with "_[:]" are non-interactive (for context only)
52
+
53
+ 4. NAVIGATION & ERROR HANDLING:
54
+ - If no suitable elements exist, use other functions to complete the task
55
+ - If stuck, try alternative approaches
56
+ - Handle popups/cookies by accepting or closing them
57
+ - Use scroll to find elements you are looking for
58
+
59
+ 5. TASK COMPLETION:
60
+ - If you think all the requirements of user\'s instruction have been completed and no further operation is required, output the **Done** action to terminate the operation process.
61
+ - Don't hallucinate actions.
62
+ - If the task requires specific information - make sure to include everything in the done function. This is what the user will see.
63
+ - If you are running out of steps (current step), think about speeding it up, and ALWAYS use the done action as the last action.
64
+ - Note that you must verify if you've truly fulfilled the user's request by examining the actual page content, not just by looking at the actions you output but also whether the action is executed successfully. Pay particular attention when errors occur during action execution.
65
+
66
+ 6. VISUAL CONTEXT:
67
+ - When an image is provided, use it to understand the page layout
68
+ - Bounding boxes with labels correspond to element indexes
69
+ - Each bounding box and its label have the same color
70
+ - Most often the label is inside the bounding box, on the top right
71
+ - Visual context helps verify element locations and relationships
72
+ - sometimes labels overlap, so use the context to verify the correct element
73
+
74
+ 7. Form filling:
75
+ - If you fill an input field and your action sequence is interrupted, most often a list with suggestions poped up under the field and you need to first select the right element from the suggestion list.
76
+
77
+ 8. ACTION SEQUENCING:
78
+ - Actions are executed in the order they appear in the list
79
+ - Each action should logically follow from the previous one
80
+ - If the page changes after an action, the sequence is interrupted and you get the new state.
81
+ - If content only disappears the sequence continues.
82
+ - Only provide the action sequence until you think the page will change.
83
+ - Try to be efficient, e.g. fill forms at once, or chain actions where nothing changes on the page like saving, extracting, checkboxes...
84
+ - only use multiple actions if it makes sense.
85
+ """
86
+ text += f" - use maximum {self.max_actions_per_step} actions per sequence"
87
+ return text
88
+
89
+ def input_format(self) -> str:
90
+ return """
91
+ INPUT STRUCTURE:
92
+ 1. Task: The user\'s instructions you need to complete.
93
+ 2. Hints(Optional): Some hints to help you complete the user\'s instructions.
94
+ 3. Memory: Important contents are recorded during historical operations for use in subsequent operations.
95
+ 4. Current URL: The webpage you're currently on
96
+ 5. Available Tabs: List of open browser tabs
97
+ 6. Interactive Elements: List in the format:
98
+ index[:]<element_type>element_text</element_type>
99
+ - index: Numeric identifier for interaction
100
+ - element_type: HTML element type (button, input, etc.)
101
+ - element_text: Visible text or element description
102
+
103
+ Example:
104
+ 33[:]<button>Submit Form</button>
105
+ _[:] Non-interactive text
106
+
107
+
108
+ Notes:
109
+ - Only elements with numeric indexes are interactive
110
+ - _[:] elements provide context but cannot be interacted with
111
+ """
112
+
113
+ def get_system_message(self) -> SystemMessage:
114
+ """
115
+ Get the system prompt for the agent.
116
+
117
+ Returns:
118
+ str: Formatted system prompt
119
+ """
120
+ AGENT_PROMPT = f"""You are a precise browser automation agent that interacts with websites through structured commands. Your role is to:
121
+ 1. Analyze the provided webpage elements and structure
122
+ 2. Plan a sequence of actions to accomplish the given task
123
+ 3. Your final result MUST be a valid JSON as the **RESPONSE FORMAT** described, containing your action sequence and state assessment, No need extra content to expalin.
124
+
125
+ {self.input_format()}
126
+
127
+ {self.important_rules()}
128
+
129
+ Functions:
130
+ {self.default_action_description}
131
+
132
+ Remember: Your responses must be valid JSON matching the specified format. Each action in the sequence must be valid."""
133
+ return SystemMessage(content=AGENT_PROMPT)
134
+
135
+
136
+ class CustomAgentMessagePrompt(AgentMessagePrompt):
137
+ def __init__(
138
+ self,
139
+ state: BrowserState,
140
+ actions: Optional[List[ActionModel]] = None,
141
+ result: Optional[List[ActionResult]] = None,
142
+ include_attributes: list[str] = [],
143
+ max_error_length: int = 400,
144
+ step_info: Optional[CustomAgentStepInfo] = None,
145
+ ):
146
+ super(CustomAgentMessagePrompt, self).__init__(state=state,
147
+ result=result,
148
+ include_attributes=include_attributes,
149
+ max_error_length=max_error_length,
150
+ step_info=step_info
151
+ )
152
+ self.actions = actions
153
+
154
+ def get_user_message(self) -> HumanMessage:
155
+ if self.step_info:
156
+ step_info_description = f'Current step: {self.step_info.step_number}/{self.step_info.max_steps}\n'
157
+ else:
158
+ step_info_description = ''
159
+
160
+ time_str = datetime.now().strftime("%Y-%m-%d %H:%M")
161
+ step_info_description += f"Current date and time: {time_str}"
162
+
163
+ elements_text = self.state.element_tree.clickable_elements_to_string(include_attributes=self.include_attributes)
164
+
165
+ has_content_above = (self.state.pixels_above or 0) > 0
166
+ has_content_below = (self.state.pixels_below or 0) > 0
167
+
168
+ if elements_text != '':
169
+ if has_content_above:
170
+ elements_text = (
171
+ f'... {self.state.pixels_above} pixels above - scroll or extract content to see more ...\n{elements_text}'
172
+ )
173
+ else:
174
+ elements_text = f'[Start of page]\n{elements_text}'
175
+ if has_content_below:
176
+ elements_text = (
177
+ f'{elements_text}\n... {self.state.pixels_below} pixels below - scroll or extract content to see more ...'
178
+ )
179
+ else:
180
+ elements_text = f'{elements_text}\n[End of page]'
181
+ else:
182
+ elements_text = 'empty page'
183
+
184
+ state_description = f"""
185
+ {step_info_description}
186
+ 1. Task: {self.step_info.task}.
187
+ 2. Hints(Optional):
188
+ {self.step_info.add_infos}
189
+ 3. Memory:
190
+ {self.step_info.memory}
191
+ 4. Current url: {self.state.url}
192
+ 5. Available tabs:
193
+ {self.state.tabs}
194
+ 6. Interactive elements:
195
+ {elements_text}
196
+ """
197
+
198
+ if self.actions and self.result:
199
+ state_description += "\n **Previous Actions** \n"
200
+ state_description += f'Previous step: {self.step_info.step_number-1}/{self.step_info.max_steps} \n'
201
+ for i, result in enumerate(self.result):
202
+ action = self.actions[i]
203
+ state_description += f"Previous action {i + 1}/{len(self.result)}: {action.model_dump_json(exclude_unset=True)}\n"
204
+ if result.include_in_memory:
205
+ if result.extracted_content:
206
+ state_description += f"Result of previous action {i + 1}/{len(self.result)}: {result.extracted_content}\n"
207
+ if result.error:
208
+ # only use last 300 characters of error
209
+ error = result.error[-self.max_error_length:]
210
+ state_description += (
211
+ f"Error of previous action {i + 1}/{len(self.result)}: ...{error}\n"
212
+ )
213
+
214
+ if self.state.screenshot:
215
+ # Format message for vision model
216
+ return HumanMessage(
217
+ content=[
218
+ {"type": "text", "text": state_description},
219
+ {
220
+ "type": "image_url",
221
+ "image_url": {
222
+ "url": f"data:image/png;base64,{self.state.screenshot}"
223
+ },
224
+ },
225
+ ]
226
+ )
227
+
228
+ return HumanMessage(content=state_description)
src/agent/custom_views.py ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dataclasses import dataclass
2
+ from typing import Type
3
+
4
+ from browser_use.agent.views import AgentOutput
5
+ from browser_use.controller.registry.views import ActionModel
6
+ from pydantic import BaseModel, ConfigDict, Field, create_model
7
+
8
+
9
+ @dataclass
10
+ class CustomAgentStepInfo:
11
+ step_number: int
12
+ max_steps: int
13
+ task: str
14
+ add_infos: str
15
+ memory: str
16
+ task_progress: str
17
+ future_plans: str
18
+
19
+
20
+ class CustomAgentBrain(BaseModel):
21
+ """Current state of the agent"""
22
+
23
+ prev_action_evaluation: str
24
+ important_contents: str
25
+ task_progress: str
26
+ future_plans: str
27
+ thought: str
28
+ summary: str
29
+
30
+
31
+ class CustomAgentOutput(AgentOutput):
32
+ """Output model for agent
33
+
34
+ @dev note: this model is extended with custom actions in AgentService. You can also use some fields that are not in this model as provided by the linter, as long as they are registered in the DynamicActions model.
35
+ """
36
+
37
+ model_config = ConfigDict(arbitrary_types_allowed=True)
38
+
39
+ current_state: CustomAgentBrain
40
+ action: list[ActionModel]
41
+
42
+ @staticmethod
43
+ def type_with_custom_actions(
44
+ custom_actions: Type[ActionModel],
45
+ ) -> Type["CustomAgentOutput"]:
46
+ """Extend actions with custom actions"""
47
+ return create_model(
48
+ "CustomAgentOutput",
49
+ __base__=CustomAgentOutput,
50
+ action=(
51
+ list[custom_actions],
52
+ Field(...),
53
+ ), # Properly annotated field with no default
54
+ __module__=CustomAgentOutput.__module__,
55
+ )
src/browser/__init__.py ADDED
File without changes
src/browser/custom_browser.py ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import asyncio
2
+ import pdb
3
+
4
+ from playwright.async_api import Browser as PlaywrightBrowser
5
+ from playwright.async_api import (
6
+ BrowserContext as PlaywrightBrowserContext,
7
+ )
8
+ from playwright.async_api import (
9
+ Playwright,
10
+ async_playwright,
11
+ )
12
+ from browser_use.browser.browser import Browser
13
+ from browser_use.browser.context import BrowserContext, BrowserContextConfig
14
+ from playwright.async_api import BrowserContext as PlaywrightBrowserContext
15
+ import logging
16
+
17
+ from .custom_context import CustomBrowserContext
18
+
19
+ logger = logging.getLogger(__name__)
20
+
21
+ class CustomBrowser(Browser):
22
+
23
+ async def new_context(
24
+ self,
25
+ config: BrowserContextConfig = BrowserContextConfig()
26
+ ) -> CustomBrowserContext:
27
+ return CustomBrowserContext(config=config, browser=self)
28
+
29
+ async def _setup_browser_with_instance(self, playwright: Playwright) -> PlaywrightBrowser:
30
+ """Sets up and returns a Playwright Browser instance with anti-detection measures."""
31
+ if not self.config.chrome_instance_path:
32
+ raise ValueError('Chrome instance path is required')
33
+ import subprocess
34
+
35
+ import requests
36
+
37
+ try:
38
+ # Check if browser is already running
39
+ response = requests.get('http://localhost:9222/json/version', timeout=2)
40
+ if response.status_code == 200:
41
+ logger.info('Reusing existing Chrome instance')
42
+ browser = await playwright.chromium.connect_over_cdp(
43
+ endpoint_url='http://localhost:9222',
44
+ timeout=20000, # 20 second timeout for connection
45
+ )
46
+ return browser
47
+ except requests.ConnectionError:
48
+ logger.debug('No existing Chrome instance found, starting a new one')
49
+
50
+ # Start a new Chrome instance
51
+ subprocess.Popen(
52
+ [
53
+ self.config.chrome_instance_path,
54
+ '--remote-debugging-port=9222',
55
+ ] + self.config.extra_chromium_args,
56
+ stdout=subprocess.DEVNULL,
57
+ stderr=subprocess.DEVNULL,
58
+ )
59
+
60
+ # try to connect first in case the browser have not started
61
+ for _ in range(10):
62
+ try:
63
+ response = requests.get('http://localhost:9222/json/version', timeout=2)
64
+ if response.status_code == 200:
65
+ break
66
+ except requests.ConnectionError:
67
+ pass
68
+ await asyncio.sleep(1)
69
+
70
+ # Attempt to connect again after starting a new instance
71
+ try:
72
+ browser = await playwright.chromium.connect_over_cdp(
73
+ endpoint_url='http://localhost:9222',
74
+ timeout=20000, # 20 second timeout for connection
75
+ )
76
+ return browser
77
+ except Exception as e:
78
+ logger.error(f'Failed to start a new Chrome instance.: {str(e)}')
79
+ raise RuntimeError(
80
+ ' To start chrome in Debug mode, you need to close all existing Chrome instances and try again otherwise we can not connect to the instance.'
81
+ )
src/browser/custom_context.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import logging
3
+ import os
4
+
5
+ from browser_use.browser.browser import Browser
6
+ from browser_use.browser.context import BrowserContext, BrowserContextConfig
7
+ from playwright.async_api import Browser as PlaywrightBrowser
8
+ from playwright.async_api import BrowserContext as PlaywrightBrowserContext
9
+
10
+ logger = logging.getLogger(__name__)
11
+
12
+
13
+ class CustomBrowserContext(BrowserContext):
14
+ def __init__(
15
+ self,
16
+ browser: "Browser",
17
+ config: BrowserContextConfig = BrowserContextConfig()
18
+ ):
19
+ super(CustomBrowserContext, self).__init__(browser=browser, config=config)
src/controller/__init__.py ADDED
File without changes
src/controller/custom_controller.py ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pdb
2
+
3
+ import pyperclip
4
+ from typing import Optional, Type
5
+ from pydantic import BaseModel
6
+ from browser_use.agent.views import ActionResult
7
+ from browser_use.browser.context import BrowserContext
8
+ from browser_use.controller.service import Controller, DoneAction
9
+ from main_content_extractor import MainContentExtractor
10
+ from browser_use.controller.views import (
11
+ ClickElementAction,
12
+ DoneAction,
13
+ ExtractPageContentAction,
14
+ GoToUrlAction,
15
+ InputTextAction,
16
+ OpenTabAction,
17
+ ScrollAction,
18
+ SearchGoogleAction,
19
+ SendKeysAction,
20
+ SwitchTabAction,
21
+ )
22
+ import logging
23
+
24
+ logger = logging.getLogger(__name__)
25
+
26
+
27
+ class CustomController(Controller):
28
+ def __init__(self, exclude_actions: list[str] = [],
29
+ output_model: Optional[Type[BaseModel]] = None
30
+ ):
31
+ super().__init__(exclude_actions=exclude_actions, output_model=output_model)
32
+ self._register_custom_actions()
33
+
34
+ def _register_custom_actions(self):
35
+ """Register all custom browser actions"""
36
+
37
+ @self.registry.action("Copy text to clipboard")
38
+ def copy_to_clipboard(text: str):
39
+ pyperclip.copy(text)
40
+ return ActionResult(extracted_content=text)
41
+
42
+ @self.registry.action("Paste text from clipboard", requires_browser=True)
43
+ async def paste_from_clipboard(browser: BrowserContext):
44
+ text = pyperclip.paste()
45
+ # send text to browser
46
+ page = await browser.get_current_page()
47
+ await page.keyboard.type(text)
48
+
49
+ return ActionResult(extracted_content=text)
50
+
51
+ @self.registry.action(
52
+ 'Extract page content to get the pure text or markdown with links if include_links is set to true',
53
+ param_model=ExtractPageContentAction,
54
+ requires_browser=True,
55
+ )
56
+ async def extract_content(params: ExtractPageContentAction, browser: BrowserContext):
57
+ page = await browser.get_current_page()
58
+ # use jina reader
59
+ url = page.url
60
+ jina_url = f"https://r.jina.ai/{url}"
61
+ await page.goto(jina_url)
62
+ output_format = 'markdown' if params.include_links else 'text'
63
+ content = MainContentExtractor.extract( # type: ignore
64
+ html=await page.content(),
65
+ output_format=output_format,
66
+ )
67
+ # go back to org url
68
+ await page.go_back()
69
+ msg = f'Extracted page content:\n {content}\n'
70
+ logger.info(msg)
71
+ return ActionResult(extracted_content=msg)
src/utils/__init__.py ADDED
File without changes
src/utils/agent_state.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import asyncio
2
+
3
+ class AgentState:
4
+ _instance = None
5
+
6
+ def __init__(self):
7
+ if not hasattr(self, '_stop_requested'):
8
+ self._stop_requested = asyncio.Event()
9
+ self.last_valid_state = None # store the last valid browser state
10
+
11
+ def __new__(cls):
12
+ if cls._instance is None:
13
+ cls._instance = super(AgentState, cls).__new__(cls)
14
+ return cls._instance
15
+
16
+ def request_stop(self):
17
+ self._stop_requested.set()
18
+
19
+ def clear_stop(self):
20
+ self._stop_requested.clear()
21
+ self.last_valid_state = None
22
+
23
+ def is_stop_requested(self):
24
+ return self._stop_requested.is_set()
25
+
26
+ def set_last_valid_state(self, state):
27
+ self.last_valid_state = state
28
+
29
+ def get_last_valid_state(self):
30
+ return self.last_valid_state
src/utils/deep_research.py ADDED
@@ -0,0 +1,351 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pdb
2
+
3
+ from dotenv import load_dotenv
4
+
5
+ load_dotenv()
6
+ import asyncio
7
+ import os
8
+ import sys
9
+ import logging
10
+ from pprint import pprint
11
+ from uuid import uuid4
12
+ from src.utils import utils
13
+ from src.agent.custom_agent import CustomAgent
14
+ import json
15
+ import re
16
+ from browser_use.agent.service import Agent
17
+ from browser_use.browser.browser import BrowserConfig, Browser
18
+ from langchain.schema import SystemMessage, HumanMessage
19
+ from json_repair import repair_json
20
+ from src.agent.custom_prompts import CustomSystemPrompt, CustomAgentMessagePrompt
21
+ from src.controller.custom_controller import CustomController
22
+ from src.browser.custom_browser import CustomBrowser
23
+ from src.browser.custom_context import BrowserContextConfig
24
+ from browser_use.browser.context import (
25
+ BrowserContextConfig,
26
+ BrowserContextWindowSize,
27
+ )
28
+
29
+ logger = logging.getLogger(__name__)
30
+
31
+
32
+ async def deep_research(task, llm, agent_state=None, **kwargs):
33
+ task_id = str(uuid4())
34
+ save_dir = kwargs.get("save_dir", os.path.join(f"./tmp/deep_research/{task_id}"))
35
+ logger.info(f"Save Deep Research at: {save_dir}")
36
+ os.makedirs(save_dir, exist_ok=True)
37
+
38
+ # max qyery num per iteration
39
+ max_query_num = kwargs.get("max_query_num", 3)
40
+
41
+ use_own_browser = kwargs.get("use_own_browser", False)
42
+ extra_chromium_args = []
43
+ if use_own_browser:
44
+ # TODO: if use own browser, max query num must be 1 per iter, how to solve it?
45
+ max_query_num = 1
46
+ chrome_path = os.getenv("CHROME_PATH", None)
47
+ if chrome_path == "":
48
+ chrome_path = None
49
+ chrome_user_data = os.getenv("CHROME_USER_DATA", None)
50
+ if chrome_user_data:
51
+ extra_chromium_args += [f"--user-data-dir={chrome_user_data}"]
52
+
53
+ browser = CustomBrowser(
54
+ config=BrowserConfig(
55
+ headless=kwargs.get("headless", False),
56
+ disable_security=kwargs.get("disable_security", True),
57
+ chrome_instance_path=chrome_path,
58
+ extra_chromium_args=extra_chromium_args,
59
+ )
60
+ )
61
+ browser_context = await browser.new_context()
62
+ else:
63
+ browser = None
64
+ browser_context = None
65
+
66
+ controller = CustomController()
67
+
68
+ search_system_prompt = f"""
69
+ You are a **Deep Researcher**, an AI agent specializing in in-depth information gathering and research using a web browser with **automated execution capabilities**. Your expertise lies in formulating comprehensive research plans and executing them meticulously to fulfill complex user requests. You will analyze user instructions, devise a detailed research plan, and determine the necessary search queries to gather the required information.
70
+
71
+ **Your Task:**
72
+
73
+ Given a user's research topic, you will:
74
+
75
+ 1. **Develop a Research Plan:** Outline the key aspects and subtopics that need to be investigated to thoroughly address the user's request. This plan should be a high-level overview of the research direction.
76
+ 2. **Generate Search Queries:** Based on your research plan, generate a list of specific search queries to be executed in a web browser. These queries should be designed to efficiently gather relevant information for each aspect of your plan.
77
+
78
+ **Output Format:**
79
+
80
+ Your output will be a JSON object with the following structure:
81
+
82
+ ```json
83
+ {{
84
+ "plan": "A concise, high-level research plan outlining the key areas to investigate.",
85
+ "queries": [
86
+ "search query 1",
87
+ "search query 2",
88
+ //... up to a maximum of {max_query_num} search queries
89
+ ]
90
+ }}
91
+ ```
92
+
93
+ **Important:**
94
+
95
+ * Limit your output to a **maximum of {max_query_num}** search queries.
96
+ * Make the search queries to help the automated agent find the needed information. Consider what keywords are most likely to lead to useful results.
97
+ * If you have gathered for all the information you want and no further search queries are required, output queries with an empty list: `[]`
98
+ * Make sure output search queries are different from the history queries.
99
+
100
+ **Inputs:**
101
+
102
+ 1. **User Instruction:** The original instruction given by the user.
103
+ 2. **Previous Queries:** History Queries.
104
+ 3. **Previous Search Results:** Textual data gathered from prior search queries. If there are no previous search results this string will be empty.
105
+ """
106
+ search_messages = [SystemMessage(content=search_system_prompt)]
107
+
108
+ record_system_prompt = """
109
+ You are an expert information recorder. Your role is to process user instructions, current search results, and previously recorded information to extract, summarize, and record new, useful information that helps fulfill the user's request. Your output will be a JSON formatted list, where each element represents a piece of extracted information and follows the structure: `{"url": "source_url", "title": "source_title", "summary_content": "concise_summary", "thinking": "reasoning"}`.
110
+
111
+ **Important Considerations:**
112
+
113
+ 1. **Minimize Information Loss:** While concise, prioritize retaining important details and nuances from the sources. Aim for a summary that captures the essence of the information without over-simplification. **Crucially, ensure to preserve key data and figures within the `summary_content`. This is essential for later stages, such as generating tables and reports.**
114
+
115
+ 2. **Avoid Redundancy:** Do not record information that is already present in the Previous Recorded Information. Check for semantic similarity, not just exact matches. However, if the same information is expressed differently in a new source and this variation adds valuable context or clarity, it should be included.
116
+
117
+ 3. **Source Information:** Extract and include the source title and URL for each piece of information summarized. This is crucial for verification and context. **The Current Search Results are provided in a specific format, where each item starts with "Title:", followed by the title, then "URL Source:", followed by the URL, and finally "Markdown Content:", followed by the content. Please extract the title and URL from this structure.** If a piece of information cannot be attributed to a specific source from the provided search results, use `"url": "unknown"` and `"title": "unknown"`.
118
+
119
+ 4. **Thinking and Report Structure:** For each extracted piece of information, add a `"thinking"` key. This field should contain your assessment of how this information could be used in a report, which section it might belong to (e.g., introduction, background, analysis, conclusion, specific subtopics), and any other relevant thoughts about its significance or connection to other information.
120
+
121
+ **Output Format:**
122
+
123
+ Provide your output as a JSON formatted list. Each item in the list must adhere to the following format:
124
+
125
+ ```json
126
+ [
127
+ {
128
+ "url": "source_url_1",
129
+ "title": "source_title_1",
130
+ "summary_content": "Concise summary of content. Remember to include key data and figures here.",
131
+ "thinking": "This could be used in the introduction to set the context. It also relates to the section on the history of the topic."
132
+ },
133
+ // ... more entries
134
+ {
135
+ "url": "unknown",
136
+ "title": "unknown",
137
+ "summary_content": "concise_summary_of_content_without_clear_source",
138
+ "thinking": "This might be useful background information, but I need to verify its accuracy. Could be used in the methodology section to explain how data was collected."
139
+ }
140
+ ]
141
+ ```
142
+
143
+ **Inputs:**
144
+
145
+ 1. **User Instruction:** The original instruction given by the user. This helps you determine what kind of information will be useful and how to structure your thinking.
146
+ 2. **Previous Recorded Information:** Textual data gathered and recorded from previous searches and processing, represented as a single text string.
147
+ 3. **Current Search Plan:** Research plan for current search.
148
+ 4. **Current Search Query:** The current search query.
149
+ 5. **Current Search Results:** Textual data gathered from the most recent search query.
150
+ """
151
+ record_messages = [SystemMessage(content=record_system_prompt)]
152
+
153
+ search_iteration = 0
154
+ max_search_iterations = kwargs.get("max_search_iterations", 10) # Limit search iterations to prevent infinite loop
155
+ use_vision = kwargs.get("use_vision", False)
156
+
157
+ history_query = []
158
+ history_infos = []
159
+ try:
160
+ while search_iteration < max_search_iterations:
161
+ search_iteration += 1
162
+ logger.info(f"Start {search_iteration}th Search...")
163
+ history_query_ = json.dumps(history_query, indent=4)
164
+ history_infos_ = json.dumps(history_infos, indent=4)
165
+ query_prompt = f"This is search {search_iteration} of {max_search_iterations} maximum searches allowed.\n User Instruction:{task} \n Previous Queries:\n {history_query_} \n Previous Search Results:\n {history_infos_}\n"
166
+ search_messages.append(HumanMessage(content=query_prompt))
167
+ ai_query_msg = llm.invoke(search_messages[:1] + search_messages[1:][-1:])
168
+ search_messages.append(ai_query_msg)
169
+ if hasattr(ai_query_msg, "reasoning_content"):
170
+ logger.info("๐Ÿคฏ Start Search Deep Thinking: ")
171
+ logger.info(ai_query_msg.reasoning_content)
172
+ logger.info("๐Ÿคฏ End Search Deep Thinking")
173
+ ai_query_content = ai_query_msg.content.replace("```json", "").replace("```", "")
174
+ ai_query_content = repair_json(ai_query_content)
175
+ ai_query_content = json.loads(ai_query_content)
176
+ query_plan = ai_query_content["plan"]
177
+ logger.info(f"Current Iteration {search_iteration} Planing:")
178
+ logger.info(query_plan)
179
+ query_tasks = ai_query_content["queries"]
180
+ if not query_tasks:
181
+ break
182
+ else:
183
+ query_tasks = query_tasks[:max_query_num]
184
+ history_query.extend(query_tasks)
185
+ logger.info("Query tasks:")
186
+ logger.info(query_tasks)
187
+
188
+ # 2. Perform Web Search and Auto exec
189
+ # Parallel BU agents
190
+ add_infos = "1. Please click on the most relevant link to get information and go deeper, instead of just staying on the search page. \n" \
191
+ "2. When opening a PDF file, please remember to extract the content using extract_content instead of simply opening it for the user to view.\n"
192
+ if use_own_browser:
193
+ agent = CustomAgent(
194
+ task=query_tasks[0],
195
+ llm=llm,
196
+ add_infos=add_infos,
197
+ browser=browser,
198
+ browser_context=browser_context,
199
+ use_vision=use_vision,
200
+ system_prompt_class=CustomSystemPrompt,
201
+ agent_prompt_class=CustomAgentMessagePrompt,
202
+ max_actions_per_step=5,
203
+ controller=controller,
204
+ agent_state=agent_state
205
+ )
206
+ agent_result = await agent.run(max_steps=kwargs.get("max_steps", 10))
207
+ query_results = [agent_result]
208
+ # Manually close all tab
209
+ session = await browser_context.get_session()
210
+ pages = session.context.pages
211
+ await browser_context.create_new_tab()
212
+ for page_id, page in enumerate(pages):
213
+ await page.close()
214
+
215
+ else:
216
+ agents = [CustomAgent(
217
+ task=task,
218
+ llm=llm,
219
+ add_infos=add_infos,
220
+ browser=browser,
221
+ browser_context=browser_context,
222
+ use_vision=use_vision,
223
+ system_prompt_class=CustomSystemPrompt,
224
+ agent_prompt_class=CustomAgentMessagePrompt,
225
+ max_actions_per_step=5,
226
+ controller=controller,
227
+ agent_state=agent_state
228
+ ) for task in query_tasks]
229
+ query_results = await asyncio.gather(
230
+ *[agent.run(max_steps=kwargs.get("max_steps", 10)) for agent in agents])
231
+
232
+ if agent_state and agent_state.is_stop_requested():
233
+ # Stop
234
+ break
235
+ # 3. Summarize Search Result
236
+ query_result_dir = os.path.join(save_dir, "query_results")
237
+ os.makedirs(query_result_dir, exist_ok=True)
238
+ for i in range(len(query_tasks)):
239
+ query_result = query_results[i].final_result()
240
+ if not query_result:
241
+ continue
242
+ querr_save_path = os.path.join(query_result_dir, f"{search_iteration}-{i}.md")
243
+ logger.info(f"save query: {query_tasks[i]} at {querr_save_path}")
244
+ with open(querr_save_path, "w", encoding="utf-8") as fw:
245
+ fw.write(f"Query: {query_tasks[i]}\n")
246
+ fw.write(query_result)
247
+ # split query result in case the content is too long
248
+ query_results_split = query_result.split("Extracted page content:")
249
+ for qi, query_result_ in enumerate(query_results_split):
250
+ if not query_result_:
251
+ continue
252
+ else:
253
+ # TODO: limit content lenght: 128k tokens, ~3 chars per token
254
+ query_result_ = query_result_[:128000 * 3]
255
+ history_infos_ = json.dumps(history_infos, indent=4)
256
+ record_prompt = f"User Instruction:{task}. \nPrevious Recorded Information:\n {history_infos_}\n Current Search Iteration: {search_iteration}\n Current Search Plan:\n{query_plan}\n Current Search Query:\n {query_tasks[i]}\n Current Search Results: {query_result_}\n "
257
+ record_messages.append(HumanMessage(content=record_prompt))
258
+ ai_record_msg = llm.invoke(record_messages[:1] + record_messages[-1:])
259
+ record_messages.append(ai_record_msg)
260
+ if hasattr(ai_record_msg, "reasoning_content"):
261
+ logger.info("๐Ÿคฏ Start Record Deep Thinking: ")
262
+ logger.info(ai_record_msg.reasoning_content)
263
+ logger.info("๐Ÿคฏ End Record Deep Thinking")
264
+ record_content = ai_record_msg.content
265
+ record_content = repair_json(record_content)
266
+ new_record_infos = json.loads(record_content)
267
+ history_infos.extend(new_record_infos)
268
+
269
+ logger.info("\nFinish Searching, Start Generating Report...")
270
+
271
+ # 5. Report Generation in Markdown (or JSON if you prefer)
272
+ return await generate_final_report(task, history_infos, save_dir, llm)
273
+
274
+ except Exception as e:
275
+ logger.error(f"Deep research Error: {e}")
276
+ return await generate_final_report(task, history_infos, save_dir, llm, str(e))
277
+ finally:
278
+ if browser:
279
+ await browser.close()
280
+ if browser_context:
281
+ await browser_context.close()
282
+ logger.info("Browser closed.")
283
+
284
+ async def generate_final_report(task, history_infos, save_dir, llm, error_msg=None):
285
+ """Generate report from collected information with error handling"""
286
+ try:
287
+ logger.info("\nAttempting to generate final report from collected data...")
288
+
289
+ writer_system_prompt = """
290
+ You are a **Deep Researcher** and a professional report writer tasked with creating polished, high-quality reports that fully meet the user's needs, based on the user's instructions and the relevant information provided. You will write the report using Markdown format, ensuring it is both informative and visually appealing.
291
+
292
+ **Specific Instructions:**
293
+
294
+ * **Structure for Impact:** The report must have a clear, logical, and impactful structure. Begin with a compelling introduction that immediately grabs the reader's attention. Develop well-structured body paragraphs that flow smoothly and logically, and conclude with a concise and memorable conclusion that summarizes key takeaways and leaves a lasting impression.
295
+ * **Engaging and Vivid Language:** Employ precise, vivid, and descriptive language to make the report captivating and enjoyable to read. Use stylistic techniques to enhance engagement. Tailor your tone, vocabulary, and writing style to perfectly suit the subject matter and the intended audience to maximize impact and readability.
296
+ * **Accuracy, Credibility, and Citations:** Ensure that all information presented is meticulously accurate, rigorously truthful, and robustly supported by the available data. **Cite sources exclusively using bracketed sequential numbers within the text (e.g., [1], [2], etc.). If no references are used, omit citations entirely.** These numbers must correspond to a numbered list of references at the end of the report.
297
+ * **Publication-Ready Formatting:** Adhere strictly to Markdown formatting for excellent readability and a clean, highly professional visual appearance. Pay close attention to formatting details like headings, lists, emphasis, and spacing to optimize the visual presentation and reader experience. The report should be ready for immediate publication upon completion, requiring minimal to no further editing for style or format.
298
+ * **Conciseness and Clarity (Unless Specified Otherwise):** When the user does not provide a specific length, prioritize concise and to-the-point writing, maximizing information density while maintaining clarity.
299
+ * **Data-Driven Comparisons with Tables:** **When appropriate and beneficial for enhancing clarity and impact, present data comparisons in well-structured Markdown tables. This is especially encouraged when dealing with numerical data or when a visual comparison can significantly improve the reader's understanding.**
300
+ * **Length Adherence:** When the user specifies a length constraint, meticulously stay within reasonable bounds of that specification, ensuring the content is appropriately scaled without sacrificing quality or completeness.
301
+ * **Comprehensive Instruction Following:** Pay meticulous attention to all details and nuances provided in the user instructions. Strive to fulfill every aspect of the user's request with the highest degree of accuracy and attention to detail, creating a report that not only meets but exceeds expectations for quality and professionalism.
302
+ * **Reference List Formatting:** The reference list at the end must be formatted as follows:
303
+ `[1] Title (URL, if available)`
304
+ **Each reference must be separated by a blank line to ensure proper spacing.** For example:
305
+
306
+ ```
307
+ [1] Title 1 (URL1, if available)
308
+
309
+ [2] Title 2 (URL2, if available)
310
+ ```
311
+ **Furthermore, ensure that the reference list is free of duplicates. Each unique source should be listed only once, regardless of how many times it is cited in the text.**
312
+ * **ABSOLUTE FINAL OUTPUT RESTRICTION:** **Your output must contain ONLY the finished, publication-ready Markdown report. Do not include ANY extraneous text, phrases, preambles, meta-commentary, or markdown code indicators (e.g., "```markdown```"). The report should begin directly with the title and introductory paragraph, and end directly after the conclusion and the reference list (if applicable).** **Your response will be deemed a failure if this instruction is not followed precisely.**
313
+
314
+ **Inputs:**
315
+
316
+ 1. **User Instruction:** The original instruction given by the user. This helps you determine what kind of information will be useful and how to structure your thinking.
317
+ 2. **Search Information:** Information gathered from the search queries.
318
+ """
319
+
320
+ history_infos_ = json.dumps(history_infos, indent=4)
321
+ record_json_path = os.path.join(save_dir, "record_infos.json")
322
+ logger.info(f"save All recorded information at {record_json_path}")
323
+ with open(record_json_path, "w") as fw:
324
+ json.dump(history_infos, fw, indent=4)
325
+ report_prompt = f"User Instruction:{task} \n Search Information:\n {history_infos_}"
326
+ report_messages = [SystemMessage(content=writer_system_prompt),
327
+ HumanMessage(content=report_prompt)] # New context for report generation
328
+ ai_report_msg = llm.invoke(report_messages)
329
+ if hasattr(ai_report_msg, "reasoning_content"):
330
+ logger.info("๐Ÿคฏ Start Report Deep Thinking: ")
331
+ logger.info(ai_report_msg.reasoning_content)
332
+ logger.info("๐Ÿคฏ End Report Deep Thinking")
333
+ report_content = ai_report_msg.content
334
+ report_content = re.sub(r"^```\s*markdown\s*|^\s*```|```\s*$", "", report_content, flags=re.MULTILINE)
335
+ report_content = report_content.strip()
336
+
337
+ # Add error notification to the report
338
+ if error_msg:
339
+ report_content = f"## โš ๏ธ Research Incomplete - Partial Results\n" \
340
+ f"**The research process was interrupted by an error:** {error_msg}\n\n" \
341
+ f"{report_content}"
342
+
343
+ report_file_path = os.path.join(save_dir, "final_report.md")
344
+ with open(report_file_path, "w", encoding="utf-8") as f:
345
+ f.write(report_content)
346
+ logger.info(f"Save Report at: {report_file_path}")
347
+ return report_content, report_file_path
348
+
349
+ except Exception as report_error:
350
+ logger.error(f"Failed to generate partial report: {report_error}")
351
+ return f"Error generating report: {str(report_error)}", None
src/utils/default_config_settings.py ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import pickle
3
+ import uuid
4
+ import gradio as gr
5
+
6
+
7
+ def default_config():
8
+ """Prepare the default configuration"""
9
+ return {
10
+ "agent_type": "custom",
11
+ "max_steps": 100,
12
+ "max_actions_per_step": 10,
13
+ "use_vision": True,
14
+ "tool_calling_method": "auto",
15
+ "llm_provider": "openai",
16
+ "llm_model_name": "gpt-4o",
17
+ "llm_temperature": 1.0,
18
+ "llm_base_url": "",
19
+ "llm_api_key": "",
20
+ "use_own_browser": os.getenv("CHROME_PERSISTENT_SESSION", "false").lower() == "true",
21
+ "keep_browser_open": False,
22
+ "headless": False,
23
+ "disable_security": True,
24
+ "enable_recording": True,
25
+ "window_w": 1280,
26
+ "window_h": 1100,
27
+ "save_recording_path": "./tmp/record_videos",
28
+ "save_trace_path": "./tmp/traces",
29
+ "save_agent_history_path": "./tmp/agent_history",
30
+ "task": "go to google.com and type 'OpenAI' click search and give me the first url",
31
+ }
32
+
33
+
34
+ def load_config_from_file(config_file):
35
+ """Load settings from a UUID.pkl file."""
36
+ try:
37
+ with open(config_file, 'rb') as f:
38
+ settings = pickle.load(f)
39
+ return settings
40
+ except Exception as e:
41
+ return f"Error loading configuration: {str(e)}"
42
+
43
+
44
+ def save_config_to_file(settings, save_dir="./tmp/webui_settings"):
45
+ """Save the current settings to a UUID.pkl file with a UUID name."""
46
+ os.makedirs(save_dir, exist_ok=True)
47
+ config_file = os.path.join(save_dir, f"{uuid.uuid4()}.pkl")
48
+ with open(config_file, 'wb') as f:
49
+ pickle.dump(settings, f)
50
+ return f"Configuration saved to {config_file}"
51
+
52
+
53
+ def save_current_config(*args):
54
+ current_config = {
55
+ "agent_type": args[0],
56
+ "max_steps": args[1],
57
+ "max_actions_per_step": args[2],
58
+ "use_vision": args[3],
59
+ "tool_calling_method": args[4],
60
+ "llm_provider": args[5],
61
+ "llm_model_name": args[6],
62
+ "llm_temperature": args[7],
63
+ "llm_base_url": args[8],
64
+ "llm_api_key": args[9],
65
+ "use_own_browser": args[10],
66
+ "keep_browser_open": args[11],
67
+ "headless": args[12],
68
+ "disable_security": args[13],
69
+ "enable_recording": args[14],
70
+ "window_w": args[15],
71
+ "window_h": args[16],
72
+ "save_recording_path": args[17],
73
+ "save_trace_path": args[18],
74
+ "save_agent_history_path": args[19],
75
+ "task": args[20],
76
+ }
77
+ return save_config_to_file(current_config)
78
+
79
+
80
+ def update_ui_from_config(config_file):
81
+ if config_file is not None:
82
+ loaded_config = load_config_from_file(config_file.name)
83
+ if isinstance(loaded_config, dict):
84
+ return (
85
+ gr.update(value=loaded_config.get("agent_type", "custom")),
86
+ gr.update(value=loaded_config.get("max_steps", 100)),
87
+ gr.update(value=loaded_config.get("max_actions_per_step", 10)),
88
+ gr.update(value=loaded_config.get("use_vision", True)),
89
+ gr.update(value=loaded_config.get("tool_calling_method", True)),
90
+ gr.update(value=loaded_config.get("llm_provider", "openai")),
91
+ gr.update(value=loaded_config.get("llm_model_name", "gpt-4o")),
92
+ gr.update(value=loaded_config.get("llm_temperature", 1.0)),
93
+ gr.update(value=loaded_config.get("llm_base_url", "")),
94
+ gr.update(value=loaded_config.get("llm_api_key", "")),
95
+ gr.update(value=loaded_config.get("use_own_browser", False)),
96
+ gr.update(value=loaded_config.get("keep_browser_open", False)),
97
+ gr.update(value=loaded_config.get("headless", False)),
98
+ gr.update(value=loaded_config.get("disable_security", True)),
99
+ gr.update(value=loaded_config.get("enable_recording", True)),
100
+ gr.update(value=loaded_config.get("window_w", 1280)),
101
+ gr.update(value=loaded_config.get("window_h", 1100)),
102
+ gr.update(value=loaded_config.get("save_recording_path", "./tmp/record_videos")),
103
+ gr.update(value=loaded_config.get("save_trace_path", "./tmp/traces")),
104
+ gr.update(value=loaded_config.get("save_agent_history_path", "./tmp/agent_history")),
105
+ gr.update(value=loaded_config.get("task", "")),
106
+ "Configuration loaded successfully."
107
+ )
108
+ else:
109
+ return (
110
+ gr.update(), gr.update(), gr.update(), gr.update(), gr.update(),
111
+ gr.update(), gr.update(), gr.update(), gr.update(), gr.update(),
112
+ gr.update(), gr.update(), gr.update(), gr.update(), gr.update(),
113
+ gr.update(), gr.update(), gr.update(), gr.update(), gr.update(),
114
+ gr.update(), "Error: Invalid configuration file."
115
+ )
116
+ return (
117
+ gr.update(), gr.update(), gr.update(), gr.update(), gr.update(),
118
+ gr.update(), gr.update(), gr.update(), gr.update(), gr.update(),
119
+ gr.update(), gr.update(), gr.update(), gr.update(), gr.update(),
120
+ gr.update(), gr.update(), gr.update(), gr.update(), gr.update(),
121
+ gr.update(), "No file selected."
122
+ )
src/utils/llm.py ADDED
@@ -0,0 +1,136 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from openai import OpenAI
2
+ import pdb
3
+ from langchain_openai import ChatOpenAI
4
+ from langchain_core.globals import get_llm_cache
5
+ from langchain_core.language_models.base import (
6
+ BaseLanguageModel,
7
+ LangSmithParams,
8
+ LanguageModelInput,
9
+ )
10
+ from langchain_core.load import dumpd, dumps
11
+ from langchain_core.messages import (
12
+ AIMessage,
13
+ SystemMessage,
14
+ AnyMessage,
15
+ BaseMessage,
16
+ BaseMessageChunk,
17
+ HumanMessage,
18
+ convert_to_messages,
19
+ message_chunk_to_message,
20
+ )
21
+ from langchain_core.outputs import (
22
+ ChatGeneration,
23
+ ChatGenerationChunk,
24
+ ChatResult,
25
+ LLMResult,
26
+ RunInfo,
27
+ )
28
+ from langchain_ollama import ChatOllama
29
+ from langchain_core.output_parsers.base import OutputParserLike
30
+ from langchain_core.runnables import Runnable, RunnableConfig
31
+ from langchain_core.tools import BaseTool
32
+
33
+ from typing import (
34
+ TYPE_CHECKING,
35
+ Any,
36
+ Callable,
37
+ Literal,
38
+ Optional,
39
+ Union,
40
+ cast,
41
+ )
42
+
43
+ class DeepSeekR1ChatOpenAI(ChatOpenAI):
44
+
45
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
46
+ super().__init__(*args, **kwargs)
47
+ self.client = OpenAI(
48
+ base_url=kwargs.get("base_url"),
49
+ api_key=kwargs.get("api_key")
50
+ )
51
+
52
+ async def ainvoke(
53
+ self,
54
+ input: LanguageModelInput,
55
+ config: Optional[RunnableConfig] = None,
56
+ *,
57
+ stop: Optional[list[str]] = None,
58
+ **kwargs: Any,
59
+ ) -> AIMessage:
60
+ message_history = []
61
+ for input_ in input:
62
+ if isinstance(input_, SystemMessage):
63
+ message_history.append({"role": "system", "content": input_.content})
64
+ elif isinstance(input_, AIMessage):
65
+ message_history.append({"role": "assistant", "content": input_.content})
66
+ else:
67
+ message_history.append({"role": "user", "content": input_.content})
68
+
69
+ response = self.client.chat.completions.create(
70
+ model=self.model_name,
71
+ messages=message_history
72
+ )
73
+
74
+ reasoning_content = response.choices[0].message.reasoning_content
75
+ content = response.choices[0].message.content
76
+ return AIMessage(content=content, reasoning_content=reasoning_content)
77
+
78
+ def invoke(
79
+ self,
80
+ input: LanguageModelInput,
81
+ config: Optional[RunnableConfig] = None,
82
+ *,
83
+ stop: Optional[list[str]] = None,
84
+ **kwargs: Any,
85
+ ) -> AIMessage:
86
+ message_history = []
87
+ for input_ in input:
88
+ if isinstance(input_, SystemMessage):
89
+ message_history.append({"role": "system", "content": input_.content})
90
+ elif isinstance(input_, AIMessage):
91
+ message_history.append({"role": "assistant", "content": input_.content})
92
+ else:
93
+ message_history.append({"role": "user", "content": input_.content})
94
+
95
+ response = self.client.chat.completions.create(
96
+ model=self.model_name,
97
+ messages=message_history
98
+ )
99
+
100
+ reasoning_content = response.choices[0].message.reasoning_content
101
+ content = response.choices[0].message.content
102
+ return AIMessage(content=content, reasoning_content=reasoning_content)
103
+
104
+ class DeepSeekR1ChatOllama(ChatOllama):
105
+
106
+ async def ainvoke(
107
+ self,
108
+ input: LanguageModelInput,
109
+ config: Optional[RunnableConfig] = None,
110
+ *,
111
+ stop: Optional[list[str]] = None,
112
+ **kwargs: Any,
113
+ ) -> AIMessage:
114
+ org_ai_message = await super().ainvoke(input=input)
115
+ org_content = org_ai_message.content
116
+ reasoning_content = org_content.split("</think>")[0].replace("<think>", "")
117
+ content = org_content.split("</think>")[1]
118
+ if "**JSON Response:**" in content:
119
+ content = content.split("**JSON Response:**")[-1]
120
+ return AIMessage(content=content, reasoning_content=reasoning_content)
121
+
122
+ def invoke(
123
+ self,
124
+ input: LanguageModelInput,
125
+ config: Optional[RunnableConfig] = None,
126
+ *,
127
+ stop: Optional[list[str]] = None,
128
+ **kwargs: Any,
129
+ ) -> AIMessage:
130
+ org_ai_message = super().invoke(input=input)
131
+ org_content = org_ai_message.content
132
+ reasoning_content = org_content.split("</think>")[0].replace("<think>", "")
133
+ content = org_content.split("</think>")[1]
134
+ if "**JSON Response:**" in content:
135
+ content = content.split("**JSON Response:**")[-1]
136
+ return AIMessage(content=content, reasoning_content=reasoning_content)
src/utils/utils.py ADDED
@@ -0,0 +1,243 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import base64
2
+ import os
3
+ import time
4
+ from pathlib import Path
5
+ from typing import Dict, Optional
6
+ import requests
7
+
8
+ from langchain_anthropic import ChatAnthropic
9
+ from langchain_mistralai import ChatMistralAI
10
+ from langchain_google_genai import ChatGoogleGenerativeAI
11
+ from langchain_ollama import ChatOllama
12
+ from langchain_openai import AzureChatOpenAI, ChatOpenAI
13
+ import gradio as gr
14
+
15
+ from .llm import DeepSeekR1ChatOpenAI, DeepSeekR1ChatOllama
16
+
17
+ PROVIDER_DISPLAY_NAMES = {
18
+ "openai": "OpenAI",
19
+ "azure_openai": "Azure OpenAI",
20
+ "anthropic": "Anthropic",
21
+ "deepseek": "DeepSeek",
22
+ "google": "Google"
23
+ }
24
+
25
+ def get_llm_model(provider: str, **kwargs):
26
+ """
27
+ ่Žทๅ–LLM ๆจกๅž‹
28
+ :param provider: ๆจกๅž‹็ฑปๅž‹
29
+ :param kwargs:
30
+ :return:
31
+ """
32
+ if provider not in ["ollama"]:
33
+ env_var = f"{provider.upper()}_API_KEY"
34
+ api_key = kwargs.get("api_key", "") or os.getenv(env_var, "")
35
+ if not api_key:
36
+ handle_api_key_error(provider, env_var)
37
+ kwargs["api_key"] = api_key
38
+
39
+ if provider == "anthropic":
40
+ if not kwargs.get("base_url", ""):
41
+ base_url = "https://api.anthropic.com"
42
+ else:
43
+ base_url = kwargs.get("base_url")
44
+
45
+ return ChatAnthropic(
46
+ model_name=kwargs.get("model_name", "claude-3-5-sonnet-20240620"),
47
+ temperature=kwargs.get("temperature", 0.0),
48
+ base_url=base_url,
49
+ api_key=api_key,
50
+ )
51
+ elif provider == 'mistral':
52
+ if not kwargs.get("base_url", ""):
53
+ base_url = os.getenv("MISTRAL_ENDPOINT", "https://api.mistral.ai/v1")
54
+ else:
55
+ base_url = kwargs.get("base_url")
56
+ if not kwargs.get("api_key", ""):
57
+ api_key = os.getenv("MISTRAL_API_KEY", "")
58
+ else:
59
+ api_key = kwargs.get("api_key")
60
+
61
+ return ChatMistralAI(
62
+ model=kwargs.get("model_name", "mistral-large-latest"),
63
+ temperature=kwargs.get("temperature", 0.0),
64
+ base_url=base_url,
65
+ api_key=api_key,
66
+ )
67
+ elif provider == "openai":
68
+ if not kwargs.get("base_url", ""):
69
+ base_url = os.getenv("OPENAI_ENDPOINT", "https://api.openai.com/v1")
70
+ else:
71
+ base_url = kwargs.get("base_url")
72
+
73
+ return ChatOpenAI(
74
+ model=kwargs.get("model_name", "gpt-4o"),
75
+ temperature=kwargs.get("temperature", 0.0),
76
+ base_url=base_url,
77
+ api_key=api_key,
78
+ )
79
+ elif provider == "deepseek":
80
+ if not kwargs.get("base_url", ""):
81
+ base_url = os.getenv("DEEPSEEK_ENDPOINT", "")
82
+ else:
83
+ base_url = kwargs.get("base_url")
84
+
85
+ if kwargs.get("model_name", "deepseek-chat") == "deepseek-reasoner":
86
+ return DeepSeekR1ChatOpenAI(
87
+ model=kwargs.get("model_name", "deepseek-reasoner"),
88
+ temperature=kwargs.get("temperature", 0.0),
89
+ base_url=base_url,
90
+ api_key=api_key,
91
+ )
92
+ else:
93
+ return ChatOpenAI(
94
+ model=kwargs.get("model_name", "deepseek-chat"),
95
+ temperature=kwargs.get("temperature", 0.0),
96
+ base_url=base_url,
97
+ api_key=api_key,
98
+ )
99
+ elif provider == "google":
100
+ return ChatGoogleGenerativeAI(
101
+ model=kwargs.get("model_name", "gemini-2.0-flash-exp"),
102
+ temperature=kwargs.get("temperature", 0.0),
103
+ google_api_key=api_key,
104
+ )
105
+ elif provider == "ollama":
106
+ if not kwargs.get("base_url", ""):
107
+ base_url = os.getenv("OLLAMA_ENDPOINT", "http://localhost:11434")
108
+ else:
109
+ base_url = kwargs.get("base_url")
110
+
111
+ if "deepseek-r1" in kwargs.get("model_name", "qwen2.5:7b"):
112
+ return DeepSeekR1ChatOllama(
113
+ model=kwargs.get("model_name", "deepseek-r1:14b"),
114
+ temperature=kwargs.get("temperature", 0.0),
115
+ num_ctx=kwargs.get("num_ctx", 32000),
116
+ base_url=base_url,
117
+ )
118
+ else:
119
+ return ChatOllama(
120
+ model=kwargs.get("model_name", "qwen2.5:7b"),
121
+ temperature=kwargs.get("temperature", 0.0),
122
+ num_ctx=kwargs.get("num_ctx", 32000),
123
+ num_predict=kwargs.get("num_predict", 1024),
124
+ base_url=base_url,
125
+ )
126
+ elif provider == "azure_openai":
127
+ if not kwargs.get("base_url", ""):
128
+ base_url = os.getenv("AZURE_OPENAI_ENDPOINT", "")
129
+ else:
130
+ base_url = kwargs.get("base_url")
131
+ api_version = kwargs.get("api_version", "") or os.getenv("AZURE_OPENAI_API_VERSION", "2025-01-01-preview")
132
+ return AzureChatOpenAI(
133
+ model=kwargs.get("model_name", "gpt-4o"),
134
+ temperature=kwargs.get("temperature", 0.0),
135
+ api_version=api_version,
136
+ azure_endpoint=base_url,
137
+ api_key=api_key,
138
+ )
139
+ else:
140
+ raise ValueError(f"Unsupported provider: {provider}")
141
+
142
+ # Predefined model names for common providers
143
+ model_names = {
144
+ "anthropic": ["claude-3-5-sonnet-20240620", "claude-3-opus-20240229"],
145
+ "openai": ["gpt-4o", "gpt-4", "gpt-3.5-turbo", "o3-mini"],
146
+ "deepseek": ["deepseek-chat", "deepseek-reasoner"],
147
+ "google": ["gemini-2.0-flash-exp", "gemini-2.0-flash-thinking-exp", "gemini-1.5-flash-latest", "gemini-1.5-flash-8b-latest", "gemini-2.0-flash-thinking-exp-01-21"],
148
+ "ollama": ["qwen2.5:7b", "llama2:7b", "deepseek-r1:14b", "deepseek-r1:32b"],
149
+ "azure_openai": ["gpt-4o", "gpt-4", "gpt-3.5-turbo"],
150
+ "mistral": ["pixtral-large-latest", "mistral-large-latest", "mistral-small-latest", "ministral-8b-latest"]
151
+ }
152
+
153
+ # Callback to update the model name dropdown based on the selected provider
154
+ def update_model_dropdown(llm_provider, api_key=None, base_url=None):
155
+ """
156
+ Update the model name dropdown with predefined models for the selected provider.
157
+ """
158
+ # Use API keys from .env if not provided
159
+ if not api_key:
160
+ api_key = os.getenv(f"{llm_provider.upper()}_API_KEY", "")
161
+ if not base_url:
162
+ base_url = os.getenv(f"{llm_provider.upper()}_BASE_URL", "")
163
+
164
+ # Use predefined models for the selected provider
165
+ if llm_provider in model_names:
166
+ return gr.Dropdown(choices=model_names[llm_provider], value=model_names[llm_provider][0], interactive=True)
167
+ else:
168
+ return gr.Dropdown(choices=[], value="", interactive=True, allow_custom_value=True)
169
+
170
+ def handle_api_key_error(provider: str, env_var: str):
171
+ """
172
+ Handles the missing API key error by raising a gr.Error with a clear message.
173
+ """
174
+ provider_display = PROVIDER_DISPLAY_NAMES.get(provider, provider.upper())
175
+ raise gr.Error(
176
+ f"๐Ÿ’ฅ {provider_display} API key not found! ๐Ÿ”‘ Please set the "
177
+ f"`{env_var}` environment variable or provide it in the UI."
178
+ )
179
+
180
+ def encode_image(img_path):
181
+ if not img_path:
182
+ return None
183
+ with open(img_path, "rb") as fin:
184
+ image_data = base64.b64encode(fin.read()).decode("utf-8")
185
+ return image_data
186
+
187
+
188
+ def get_latest_files(directory: str, file_types: list = ['.webm', '.zip']) -> Dict[str, Optional[str]]:
189
+ """Get the latest recording and trace files"""
190
+ latest_files: Dict[str, Optional[str]] = {ext: None for ext in file_types}
191
+
192
+ if not os.path.exists(directory):
193
+ os.makedirs(directory, exist_ok=True)
194
+ return latest_files
195
+
196
+ for file_type in file_types:
197
+ try:
198
+ matches = list(Path(directory).rglob(f"*{file_type}"))
199
+ if matches:
200
+ latest = max(matches, key=lambda p: p.stat().st_mtime)
201
+ # Only return files that are complete (not being written)
202
+ if time.time() - latest.stat().st_mtime > 1.0:
203
+ latest_files[file_type] = str(latest)
204
+ except Exception as e:
205
+ print(f"Error getting latest {file_type} file: {e}")
206
+
207
+ return latest_files
208
+ async def capture_screenshot(browser_context):
209
+ """Capture and encode a screenshot"""
210
+ # Extract the Playwright browser instance
211
+ playwright_browser = browser_context.browser.playwright_browser # Ensure this is correct.
212
+
213
+ # Check if the browser instance is valid and if an existing context can be reused
214
+ if playwright_browser and playwright_browser.contexts:
215
+ playwright_context = playwright_browser.contexts[0]
216
+ else:
217
+ return None
218
+
219
+ # Access pages in the context
220
+ pages = None
221
+ if playwright_context:
222
+ pages = playwright_context.pages
223
+
224
+ # Use an existing page or create a new one if none exist
225
+ if pages:
226
+ active_page = pages[0]
227
+ for page in pages:
228
+ if page.url != "about:blank":
229
+ active_page = page
230
+ else:
231
+ return None
232
+
233
+ # Take screenshot
234
+ try:
235
+ screenshot = await active_page.screenshot(
236
+ type='jpeg',
237
+ quality=75,
238
+ scale="css"
239
+ )
240
+ encoded = base64.b64encode(screenshot).decode('utf-8')
241
+ return encoded
242
+ except Exception as e:
243
+ return None
supervisord.conf ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [supervisord]
2
+ user=root
3
+ nodaemon=true
4
+ logfile=/dev/stdout
5
+ logfile_maxbytes=0
6
+ loglevel=debug
7
+
8
+ [program:xvfb]
9
+ command=Xvfb :99 -screen 0 %(ENV_RESOLUTION)s -ac +extension GLX +render -noreset
10
+ autorestart=true
11
+ stdout_logfile=/dev/stdout
12
+ stdout_logfile_maxbytes=0
13
+ stderr_logfile=/dev/stderr
14
+ stderr_logfile_maxbytes=0
15
+ priority=100
16
+ startsecs=3
17
+ stopsignal=TERM
18
+ stopwaitsecs=10
19
+
20
+ [program:vnc_setup]
21
+ command=bash -c "mkdir -p ~/.vnc && echo '%(ENV_VNC_PASSWORD)s' | vncpasswd -f > ~/.vnc/passwd && chmod 600 ~/.vnc/passwd && ls -la ~/.vnc/passwd"
22
+ autorestart=false
23
+ startsecs=0
24
+ priority=150
25
+ stdout_logfile=/dev/stdout
26
+ stdout_logfile_maxbytes=0
27
+ stderr_logfile=/dev/stderr
28
+ stderr_logfile_maxbytes=0
29
+
30
+ [program:x11vnc]
31
+ command=bash -c "mkdir -p /var/log && touch /var/log/x11vnc.log && chmod 666 /var/log/x11vnc.log && sleep 5 && DISPLAY=:99 x11vnc -display :99 -forever -shared -rfbauth /root/.vnc/passwd -rfbport 5901 -o /var/log/x11vnc.log"
32
+ autorestart=true
33
+ stdout_logfile=/dev/stdout
34
+ stdout_logfile_maxbytes=0
35
+ stderr_logfile=/dev/stderr
36
+ stderr_logfile_maxbytes=0
37
+ priority=200
38
+ startretries=10
39
+ startsecs=10
40
+ stopsignal=TERM
41
+ stopwaitsecs=10
42
+ depends_on=vnc_setup,xvfb
43
+
44
+ [program:x11vnc_log]
45
+ command=bash -c "mkdir -p /var/log && touch /var/log/x11vnc.log && tail -f /var/log/x11vnc.log"
46
+ autorestart=true
47
+ stdout_logfile=/dev/stdout
48
+ stdout_logfile_maxbytes=0
49
+ stderr_logfile=/dev/stderr
50
+ stderr_logfile_maxbytes=0
51
+ priority=250
52
+ stopsignal=TERM
53
+ stopwaitsecs=5
54
+ depends_on=x11vnc
55
+
56
+ [program:novnc]
57
+ command=bash -c "sleep 5 && cd /opt/novnc && ./utils/novnc_proxy --vnc localhost:5901 --listen 0.0.0.0:6080 --web /opt/novnc"
58
+ autorestart=true
59
+ stdout_logfile=/dev/stdout
60
+ stdout_logfile_maxbytes=0
61
+ stderr_logfile=/dev/stderr
62
+ stderr_logfile_maxbytes=0
63
+ priority=300
64
+ startretries=5
65
+ startsecs=3
66
+ depends_on=x11vnc
67
+
68
+ [program:persistent_browser]
69
+ environment=START_URL="data:text/html,<html><body><h1>Browser Ready</h1></body></html>"
70
+ command=bash -c "mkdir -p /app/data/chrome_data && sleep 8 && $(find /ms-playwright/chromium-*/chrome-linux -name chrome) --user-data-dir=/app/data/chrome_data --window-position=0,0 --window-size=%(ENV_RESOLUTION_WIDTH)s,%(ENV_RESOLUTION_HEIGHT)s --start-maximized --no-sandbox --disable-dev-shm-usage --disable-gpu --disable-software-rasterizer --disable-setuid-sandbox --no-first-run --no-default-browser-check --no-experiments --ignore-certificate-errors --remote-debugging-port=9222 --remote-debugging-address=0.0.0.0 \"$START_URL\""
71
+ autorestart=true
72
+ stdout_logfile=/dev/stdout
73
+ stdout_logfile_maxbytes=0
74
+ stderr_logfile=/dev/stderr
75
+ stderr_logfile_maxbytes=0
76
+ priority=350
77
+ startretries=5
78
+ startsecs=10
79
+ stopsignal=TERM
80
+ stopwaitsecs=15
81
+ depends_on=novnc
82
+
83
+ [program:webui]
84
+ command=python webui.py --ip 0.0.0.0 --port 7788
85
+ directory=/app
86
+ autorestart=true
87
+ stdout_logfile=/dev/stdout
88
+ stdout_logfile_maxbytes=0
89
+ stderr_logfile=/dev/stderr
90
+ stderr_logfile_maxbytes=0
91
+ priority=400
92
+ startretries=3
93
+ startsecs=3
94
+ stopsignal=TERM
95
+ stopwaitsecs=10
96
+ depends_on=persistent_browser
tests/test_browser_use.py ADDED
@@ -0,0 +1,361 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pdb
2
+
3
+ from dotenv import load_dotenv
4
+
5
+ load_dotenv()
6
+ import sys
7
+
8
+ sys.path.append(".")
9
+ import asyncio
10
+ import os
11
+ import sys
12
+ from pprint import pprint
13
+
14
+ from browser_use import Agent
15
+ from browser_use.agent.views import AgentHistoryList
16
+
17
+ from src.utils import utils
18
+
19
+
20
+ async def test_browser_use_org():
21
+ from browser_use.browser.browser import Browser, BrowserConfig
22
+ from browser_use.browser.context import (
23
+ BrowserContextConfig,
24
+ BrowserContextWindowSize,
25
+ )
26
+
27
+ # llm = utils.get_llm_model(
28
+ # provider="azure_openai",
29
+ # model_name="gpt-4o",
30
+ # temperature=0.8,
31
+ # base_url=os.getenv("AZURE_OPENAI_ENDPOINT", ""),
32
+ # api_key=os.getenv("AZURE_OPENAI_API_KEY", ""),
33
+ # )
34
+
35
+ # llm = utils.get_llm_model(
36
+ # provider="deepseek",
37
+ # model_name="deepseek-chat",
38
+ # temperature=0.8
39
+ # )
40
+
41
+ llm = utils.get_llm_model(
42
+ provider="ollama", model_name="deepseek-r1:14b", temperature=0.5
43
+ )
44
+
45
+ window_w, window_h = 1920, 1080
46
+ use_vision = False
47
+ use_own_browser = False
48
+ if use_own_browser:
49
+ chrome_path = os.getenv("CHROME_PATH", None)
50
+ if chrome_path == "":
51
+ chrome_path = None
52
+ else:
53
+ chrome_path = None
54
+
55
+ tool_calling_method = "json_schema" # setting to json_schema when using ollma
56
+
57
+ browser = Browser(
58
+ config=BrowserConfig(
59
+ headless=False,
60
+ disable_security=True,
61
+ chrome_instance_path=chrome_path,
62
+ extra_chromium_args=[f"--window-size={window_w},{window_h}"],
63
+ )
64
+ )
65
+ async with await browser.new_context(
66
+ config=BrowserContextConfig(
67
+ trace_path="./tmp/traces",
68
+ save_recording_path="./tmp/record_videos",
69
+ no_viewport=False,
70
+ browser_window_size=BrowserContextWindowSize(
71
+ width=window_w, height=window_h
72
+ ),
73
+ )
74
+ ) as browser_context:
75
+ agent = Agent(
76
+ task="go to google.com and type 'OpenAI' click search and give me the first url",
77
+ llm=llm,
78
+ browser_context=browser_context,
79
+ use_vision=use_vision,
80
+ tool_calling_method=tool_calling_method
81
+ )
82
+ history: AgentHistoryList = await agent.run(max_steps=10)
83
+
84
+ print("Final Result:")
85
+ pprint(history.final_result(), indent=4)
86
+
87
+ print("\nErrors:")
88
+ pprint(history.errors(), indent=4)
89
+
90
+ # e.g. xPaths the model clicked on
91
+ print("\nModel Outputs:")
92
+ pprint(history.model_actions(), indent=4)
93
+
94
+ print("\nThoughts:")
95
+ pprint(history.model_thoughts(), indent=4)
96
+ # close browser
97
+ await browser.close()
98
+
99
+
100
+ async def test_browser_use_custom():
101
+ from browser_use.browser.context import BrowserContextWindowSize
102
+ from browser_use.browser.browser import BrowserConfig
103
+ from playwright.async_api import async_playwright
104
+
105
+ from src.agent.custom_agent import CustomAgent
106
+ from src.agent.custom_prompts import CustomSystemPrompt, CustomAgentMessagePrompt
107
+ from src.browser.custom_browser import CustomBrowser
108
+ from src.browser.custom_context import BrowserContextConfig
109
+ from src.controller.custom_controller import CustomController
110
+
111
+ window_w, window_h = 1920, 1080
112
+
113
+ # llm = utils.get_llm_model(
114
+ # provider="openai",
115
+ # model_name="gpt-4o",
116
+ # temperature=0.8,
117
+ # base_url=os.getenv("OPENAI_ENDPOINT", ""),
118
+ # api_key=os.getenv("OPENAI_API_KEY", ""),
119
+ # )
120
+
121
+ llm = utils.get_llm_model(
122
+ provider="azure_openai",
123
+ model_name="gpt-4o",
124
+ temperature=0.8,
125
+ base_url=os.getenv("AZURE_OPENAI_ENDPOINT", ""),
126
+ api_key=os.getenv("AZURE_OPENAI_API_KEY", ""),
127
+ )
128
+
129
+ # llm = utils.get_llm_model(
130
+ # provider="google",
131
+ # model_name="gemini-2.0-flash-exp",
132
+ # temperature=1.0,
133
+ # api_key=os.getenv("GOOGLE_API_KEY", "")
134
+ # )
135
+
136
+ # llm = utils.get_llm_model(
137
+ # provider="deepseek",
138
+ # model_name="deepseek-reasoner",
139
+ # temperature=0.8
140
+ # )
141
+
142
+ # llm = utils.get_llm_model(
143
+ # provider="deepseek",
144
+ # model_name="deepseek-chat",
145
+ # temperature=0.8
146
+ # )
147
+
148
+ # llm = utils.get_llm_model(
149
+ # provider="ollama", model_name="qwen2.5:7b", temperature=0.5
150
+ # )
151
+
152
+ # llm = utils.get_llm_model(
153
+ # provider="ollama", model_name="deepseek-r1:14b", temperature=0.5
154
+ # )
155
+
156
+ controller = CustomController()
157
+ use_own_browser = True
158
+ disable_security = True
159
+ use_vision = False # Set to False when using DeepSeek
160
+
161
+ max_actions_per_step = 1
162
+ playwright = None
163
+ browser = None
164
+ browser_context = None
165
+
166
+ try:
167
+ extra_chromium_args = [f"--window-size={window_w},{window_h}"]
168
+ if use_own_browser:
169
+ chrome_path = os.getenv("CHROME_PATH", None)
170
+ if chrome_path == "":
171
+ chrome_path = None
172
+ chrome_user_data = os.getenv("CHROME_USER_DATA", None)
173
+ if chrome_user_data:
174
+ extra_chromium_args += [f"--user-data-dir={chrome_user_data}"]
175
+ else:
176
+ chrome_path = None
177
+ browser = CustomBrowser(
178
+ config=BrowserConfig(
179
+ headless=False,
180
+ disable_security=disable_security,
181
+ chrome_instance_path=chrome_path,
182
+ extra_chromium_args=extra_chromium_args,
183
+ )
184
+ )
185
+ browser_context = await browser.new_context(
186
+ config=BrowserContextConfig(
187
+ trace_path="./tmp/traces",
188
+ save_recording_path="./tmp/record_videos",
189
+ no_viewport=False,
190
+ browser_window_size=BrowserContextWindowSize(
191
+ width=window_w, height=window_h
192
+ ),
193
+ )
194
+ )
195
+ agent = CustomAgent(
196
+ task="Search 'Nvidia' and give me the first url",
197
+ add_infos="", # some hints for llm to complete the task
198
+ llm=llm,
199
+ browser=browser,
200
+ browser_context=browser_context,
201
+ controller=controller,
202
+ system_prompt_class=CustomSystemPrompt,
203
+ agent_prompt_class=CustomAgentMessagePrompt,
204
+ use_vision=use_vision,
205
+ max_actions_per_step=max_actions_per_step
206
+ )
207
+ history: AgentHistoryList = await agent.run(max_steps=100)
208
+
209
+ print("Final Result:")
210
+ pprint(history.final_result(), indent=4)
211
+
212
+ print("\nErrors:")
213
+ pprint(history.errors(), indent=4)
214
+
215
+ # e.g. xPaths the model clicked on
216
+ print("\nModel Outputs:")
217
+ pprint(history.model_actions(), indent=4)
218
+
219
+ print("\nThoughts:")
220
+ pprint(history.model_thoughts(), indent=4)
221
+ # close browser
222
+ except Exception:
223
+ import traceback
224
+
225
+ traceback.print_exc()
226
+ finally:
227
+ # ๆ˜พๅผๅ…ณ้—ญๆŒไน…ๅŒ–ไธŠไธ‹ๆ–‡
228
+ if browser_context:
229
+ await browser_context.close()
230
+
231
+ # ๅ…ณ้—ญ Playwright ๅฏน่ฑก
232
+ if playwright:
233
+ await playwright.stop()
234
+ if browser:
235
+ await browser.close()
236
+
237
+ async def test_browser_use_parallel():
238
+ from browser_use.browser.context import BrowserContextWindowSize
239
+ from browser_use.browser.browser import BrowserConfig
240
+ from playwright.async_api import async_playwright
241
+ from browser_use.browser.browser import Browser
242
+ from src.agent.custom_agent import CustomAgent
243
+ from src.agent.custom_prompts import CustomSystemPrompt, CustomAgentMessagePrompt
244
+ from src.browser.custom_browser import CustomBrowser
245
+ from src.browser.custom_context import BrowserContextConfig
246
+ from src.controller.custom_controller import CustomController
247
+
248
+ window_w, window_h = 1920, 1080
249
+
250
+ # llm = utils.get_llm_model(
251
+ # provider="openai",
252
+ # model_name="gpt-4o",
253
+ # temperature=0.8,
254
+ # base_url=os.getenv("OPENAI_ENDPOINT", ""),
255
+ # api_key=os.getenv("OPENAI_API_KEY", ""),
256
+ # )
257
+
258
+ # llm = utils.get_llm_model(
259
+ # provider="azure_openai",
260
+ # model_name="gpt-4o",
261
+ # temperature=0.8,
262
+ # base_url=os.getenv("AZURE_OPENAI_ENDPOINT", ""),
263
+ # api_key=os.getenv("AZURE_OPENAI_API_KEY", ""),
264
+ # )
265
+
266
+ llm = utils.get_llm_model(
267
+ provider="gemini",
268
+ model_name="gemini-2.0-flash-exp",
269
+ temperature=1.0,
270
+ api_key=os.getenv("GOOGLE_API_KEY", "")
271
+ )
272
+
273
+ # llm = utils.get_llm_model(
274
+ # provider="deepseek",
275
+ # model_name="deepseek-reasoner",
276
+ # temperature=0.8
277
+ # )
278
+
279
+ # llm = utils.get_llm_model(
280
+ # provider="deepseek",
281
+ # model_name="deepseek-chat",
282
+ # temperature=0.8
283
+ # )
284
+
285
+ # llm = utils.get_llm_model(
286
+ # provider="ollama", model_name="qwen2.5:7b", temperature=0.5
287
+ # )
288
+
289
+ # llm = utils.get_llm_model(
290
+ # provider="ollama", model_name="deepseek-r1:14b", temperature=0.5
291
+ # )
292
+
293
+ controller = CustomController()
294
+ use_own_browser = True
295
+ disable_security = True
296
+ use_vision = True # Set to False when using DeepSeek
297
+
298
+ max_actions_per_step = 1
299
+ playwright = None
300
+ browser = None
301
+ browser_context = None
302
+
303
+ browser = Browser(
304
+ config=BrowserConfig(
305
+ disable_security=True,
306
+ headless=False,
307
+ new_context_config=BrowserContextConfig(save_recording_path='./tmp/recordings'),
308
+ )
309
+ )
310
+
311
+ try:
312
+ agents = [
313
+ Agent(task=task, llm=llm, browser=browser)
314
+ for task in [
315
+ 'Search Google for weather in Tokyo',
316
+ 'Check Reddit front page title',
317
+ 'ๅคงSๅŽปไธ–',
318
+ 'Find NASA image of the day',
319
+ # 'Check top story on CNN',
320
+ # 'Search latest SpaceX launch date',
321
+ # 'Look up population of Paris',
322
+ # 'Find current time in Sydney',
323
+ # 'Check who won last Super Bowl',
324
+ # 'Search trending topics on Twitter',
325
+ ]
326
+ ]
327
+
328
+ history = await asyncio.gather(*[agent.run() for agent in agents])
329
+ pdb.set_trace()
330
+ print("Final Result:")
331
+ pprint(history.final_result(), indent=4)
332
+
333
+ print("\nErrors:")
334
+ pprint(history.errors(), indent=4)
335
+
336
+ # e.g. xPaths the model clicked on
337
+ print("\nModel Outputs:")
338
+ pprint(history.model_actions(), indent=4)
339
+
340
+ print("\nThoughts:")
341
+ pprint(history.model_thoughts(), indent=4)
342
+ # close browser
343
+ except Exception:
344
+ import traceback
345
+
346
+ traceback.print_exc()
347
+ finally:
348
+ # ๆ˜พๅผๅ…ณ้—ญๆŒไน…ๅŒ–ไธŠไธ‹ๆ–‡
349
+ if browser_context:
350
+ await browser_context.close()
351
+
352
+ # ๅ…ณ้—ญ Playwright ๅฏน่ฑก
353
+ if playwright:
354
+ await playwright.stop()
355
+ if browser:
356
+ await browser.close()
357
+
358
+ if __name__ == "__main__":
359
+ # asyncio.run(test_browser_use_org())
360
+ # asyncio.run(test_browser_use_parallel())
361
+ asyncio.run(test_browser_use_custom())
tests/test_deep_research.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import asyncio
2
+ import os
3
+ from dotenv import load_dotenv
4
+
5
+ load_dotenv()
6
+ import sys
7
+
8
+ sys.path.append(".")
9
+
10
+ async def test_deep_research():
11
+ from src.utils.deep_research import deep_research
12
+ from src.utils import utils
13
+
14
+ task = "write a report about DeepSeek-R1, get its pdf"
15
+ llm = utils.get_llm_model(
16
+ provider="gemini",
17
+ model_name="gemini-2.0-flash-thinking-exp-01-21",
18
+ temperature=1.0,
19
+ api_key=os.getenv("GOOGLE_API_KEY", "")
20
+ )
21
+
22
+ report_content, report_file_path = await deep_research(task=task, llm=llm, agent_state=None,
23
+ max_search_iterations=1,
24
+ max_query_num=3,
25
+ use_own_browser=False)
26
+
27
+
28
+
29
+ if __name__ == "__main__":
30
+ asyncio.run(test_deep_research())
tests/test_llm_api.py ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import pdb
3
+ from dataclasses import dataclass
4
+
5
+ from dotenv import load_dotenv
6
+ from langchain_core.messages import HumanMessage, SystemMessage
7
+ from langchain_ollama import ChatOllama
8
+
9
+ load_dotenv()
10
+
11
+ import sys
12
+
13
+ sys.path.append(".")
14
+
15
+ @dataclass
16
+ class LLMConfig:
17
+ provider: str
18
+ model_name: str
19
+ temperature: float = 0.8
20
+ base_url: str = None
21
+ api_key: str = None
22
+
23
+ def create_message_content(text, image_path=None):
24
+ content = [{"type": "text", "text": text}]
25
+
26
+ if image_path:
27
+ from src.utils import utils
28
+ image_data = utils.encode_image(image_path)
29
+ content.append({
30
+ "type": "image_url",
31
+ "image_url": {"url": f"data:image/jpeg;base64,{image_data}"}
32
+ })
33
+
34
+ return content
35
+
36
+ def get_env_value(key, provider):
37
+ env_mappings = {
38
+ "openai": {"api_key": "OPENAI_API_KEY", "base_url": "OPENAI_ENDPOINT"},
39
+ "azure_openai": {"api_key": "AZURE_OPENAI_API_KEY", "base_url": "AZURE_OPENAI_ENDPOINT"},
40
+ "google": {"api_key": "GOOGLE_API_KEY"},
41
+ "deepseek": {"api_key": "DEEPSEEK_API_KEY", "base_url": "DEEPSEEK_ENDPOINT"},
42
+ "mistral": {"api_key": "MISTRAL_API_KEY", "base_url": "MISTRAL_ENDPOINT"},
43
+ }
44
+
45
+ if provider in env_mappings and key in env_mappings[provider]:
46
+ return os.getenv(env_mappings[provider][key], "")
47
+ return ""
48
+
49
+ def test_llm(config, query, image_path=None, system_message=None):
50
+ from src.utils import utils
51
+
52
+ # Special handling for Ollama-based models
53
+ if config.provider == "ollama":
54
+ if "deepseek-r1" in config.model_name:
55
+ from src.utils.llm import DeepSeekR1ChatOllama
56
+ llm = DeepSeekR1ChatOllama(model=config.model_name)
57
+ else:
58
+ llm = ChatOllama(model=config.model_name)
59
+
60
+ ai_msg = llm.invoke(query)
61
+ print(ai_msg.content)
62
+ if "deepseek-r1" in config.model_name:
63
+ pdb.set_trace()
64
+ return
65
+
66
+ # For other providers, use the standard configuration
67
+ llm = utils.get_llm_model(
68
+ provider=config.provider,
69
+ model_name=config.model_name,
70
+ temperature=config.temperature,
71
+ base_url=config.base_url or get_env_value("base_url", config.provider),
72
+ api_key=config.api_key or get_env_value("api_key", config.provider)
73
+ )
74
+
75
+ # Prepare messages for non-Ollama models
76
+ messages = []
77
+ if system_message:
78
+ messages.append(SystemMessage(content=create_message_content(system_message)))
79
+ messages.append(HumanMessage(content=create_message_content(query, image_path)))
80
+ ai_msg = llm.invoke(messages)
81
+
82
+ # Handle different response types
83
+ if hasattr(ai_msg, "reasoning_content"):
84
+ print(ai_msg.reasoning_content)
85
+ print(ai_msg.content)
86
+
87
+ if config.provider == "deepseek" and "deepseek-reasoner" in config.model_name:
88
+ print(llm.model_name)
89
+ pdb.set_trace()
90
+
91
+ def test_openai_model():
92
+ config = LLMConfig(provider="openai", model_name="gpt-4o")
93
+ test_llm(config, "Describe this image", "assets/examples/test.png")
94
+
95
+ def test_google_model():
96
+ # Enable your API key first if you haven't: https://ai.google.dev/palm_docs/oauth_quickstart
97
+ config = LLMConfig(provider="google", model_name="gemini-2.0-flash-exp")
98
+ test_llm(config, "Describe this image", "assets/examples/test.png")
99
+
100
+ def test_azure_openai_model():
101
+ config = LLMConfig(provider="azure_openai", model_name="gpt-4o")
102
+ test_llm(config, "Describe this image", "assets/examples/test.png")
103
+
104
+ def test_deepseek_model():
105
+ config = LLMConfig(provider="deepseek", model_name="deepseek-chat")
106
+ test_llm(config, "Who are you?")
107
+
108
+ def test_deepseek_r1_model():
109
+ config = LLMConfig(provider="deepseek", model_name="deepseek-reasoner")
110
+ test_llm(config, "Which is greater, 9.11 or 9.8?", system_message="You are a helpful AI assistant.")
111
+
112
+ def test_ollama_model():
113
+ config = LLMConfig(provider="ollama", model_name="qwen2.5:7b")
114
+ test_llm(config, "Sing a ballad of LangChain.")
115
+
116
+ def test_deepseek_r1_ollama_model():
117
+ config = LLMConfig(provider="ollama", model_name="deepseek-r1:14b")
118
+ test_llm(config, "How many 'r's are in the word 'strawberry'?")
119
+
120
+ def test_mistral_model():
121
+ config = LLMConfig(provider="mistral", model_name="pixtral-large-latest")
122
+ test_llm(config, "Describe this image", "assets/examples/test.png")
123
+
124
+ if __name__ == "__main__":
125
+ # test_openai_model()
126
+ # test_google_model()
127
+ # test_azure_openai_model()
128
+ #test_deepseek_model()
129
+ # test_ollama_model()
130
+ test_deepseek_r1_model()
131
+ # test_deepseek_r1_ollama_model()
132
+ # test_mistral_model()
tests/test_playwright.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pdb
2
+ from dotenv import load_dotenv
3
+
4
+ load_dotenv()
5
+
6
+
7
+ def test_connect_browser():
8
+ import os
9
+ from playwright.sync_api import sync_playwright
10
+
11
+ chrome_exe = os.getenv("CHROME_PATH", "")
12
+ chrome_use_data = os.getenv("CHROME_USER_DATA", "")
13
+
14
+ with sync_playwright() as p:
15
+ browser = p.chromium.launch_persistent_context(
16
+ user_data_dir=chrome_use_data,
17
+ executable_path=chrome_exe,
18
+ headless=False # Keep browser window visible
19
+ )
20
+
21
+ page = browser.new_page()
22
+ page.goto("https://mail.google.com/mail/u/0/#inbox")
23
+ page.wait_for_load_state()
24
+
25
+ input("Press the Enter key to close the browser...")
26
+
27
+ browser.close()
28
+
29
+
30
+ if __name__ == '__main__':
31
+ test_connect_browser()
webui.py ADDED
@@ -0,0 +1,1073 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pdb
2
+ import logging
3
+
4
+ from dotenv import load_dotenv
5
+
6
+ load_dotenv()
7
+ import os
8
+ import glob
9
+ import asyncio
10
+ import argparse
11
+ import os
12
+
13
+ logger = logging.getLogger(__name__)
14
+
15
+ import gradio as gr
16
+ import random
17
+
18
+ from browser_use.agent.service import Agent
19
+ from playwright.async_api import async_playwright
20
+ from browser_use.browser.browser import Browser, BrowserConfig
21
+ from browser_use.browser.context import (
22
+ BrowserContextConfig,
23
+ BrowserContextWindowSize,
24
+ )
25
+ from langchain_ollama import ChatOllama
26
+ from playwright.async_api import async_playwright
27
+ from src.utils.agent_state import AgentState
28
+
29
+ from src.utils import utils
30
+ from src.agent.custom_agent import CustomAgent
31
+ from src.browser.custom_browser import CustomBrowser
32
+ from src.agent.custom_prompts import CustomSystemPrompt, CustomAgentMessagePrompt
33
+ from src.browser.custom_context import BrowserContextConfig, CustomBrowserContext
34
+ from src.controller.custom_controller import CustomController
35
+ from gradio.themes import Citrus, Default, Glass, Monochrome, Ocean, Origin, Soft, Base
36
+ from src.utils.default_config_settings import default_config, load_config_from_file, save_config_to_file, save_current_config, update_ui_from_config
37
+ from src.utils.utils import update_model_dropdown, get_latest_files, capture_screenshot
38
+
39
+
40
+ # Global variables for persistence
41
+ _global_browser = None
42
+ _global_browser_context = None
43
+
44
+ # Create the global agent state instance
45
+ _global_agent_state = AgentState()
46
+
47
+ async def stop_agent():
48
+ """Request the agent to stop and update UI with enhanced feedback"""
49
+ global _global_agent_state, _global_browser_context, _global_browser
50
+
51
+ try:
52
+ # Request stop
53
+ _global_agent_state.request_stop()
54
+
55
+ # Update UI immediately
56
+ message = "Stop requested - the agent will halt at the next safe point"
57
+ logger.info(f"๐Ÿ›‘ {message}")
58
+
59
+ # Return UI updates
60
+ return (
61
+ message, # errors_output
62
+ gr.update(value="Stopping...", interactive=False), # stop_button
63
+ gr.update(interactive=False), # run_button
64
+ )
65
+ except Exception as e:
66
+ error_msg = f"Error during stop: {str(e)}"
67
+ logger.error(error_msg)
68
+ return (
69
+ error_msg,
70
+ gr.update(value="Stop", interactive=True),
71
+ gr.update(interactive=True)
72
+ )
73
+
74
+ async def stop_research_agent():
75
+ """Request the agent to stop and update UI with enhanced feedback"""
76
+ global _global_agent_state, _global_browser_context, _global_browser
77
+
78
+ try:
79
+ # Request stop
80
+ _global_agent_state.request_stop()
81
+
82
+ # Update UI immediately
83
+ message = "Stop requested - the agent will halt at the next safe point"
84
+ logger.info(f"๐Ÿ›‘ {message}")
85
+
86
+ # Return UI updates
87
+ return ( # errors_output
88
+ gr.update(value="Stopping...", interactive=False), # stop_button
89
+ gr.update(interactive=False), # run_button
90
+ )
91
+ except Exception as e:
92
+ error_msg = f"Error during stop: {str(e)}"
93
+ logger.error(error_msg)
94
+ return (
95
+ gr.update(value="Stop", interactive=True),
96
+ gr.update(interactive=True)
97
+ )
98
+
99
+ async def run_browser_agent(
100
+ agent_type,
101
+ llm_provider,
102
+ llm_model_name,
103
+ llm_temperature,
104
+ llm_base_url,
105
+ llm_api_key,
106
+ use_own_browser,
107
+ keep_browser_open,
108
+ headless,
109
+ disable_security,
110
+ window_w,
111
+ window_h,
112
+ save_recording_path,
113
+ save_agent_history_path,
114
+ save_trace_path,
115
+ enable_recording,
116
+ task,
117
+ add_infos,
118
+ max_steps,
119
+ use_vision,
120
+ max_actions_per_step,
121
+ tool_calling_method
122
+ ):
123
+ global _global_agent_state
124
+ _global_agent_state.clear_stop() # Clear any previous stop requests
125
+
126
+ try:
127
+ # Disable recording if the checkbox is unchecked
128
+ if not enable_recording:
129
+ save_recording_path = None
130
+
131
+ # Ensure the recording directory exists if recording is enabled
132
+ if save_recording_path:
133
+ os.makedirs(save_recording_path, exist_ok=True)
134
+
135
+ # Get the list of existing videos before the agent runs
136
+ existing_videos = set()
137
+ if save_recording_path:
138
+ existing_videos = set(
139
+ glob.glob(os.path.join(save_recording_path, "*.[mM][pP]4"))
140
+ + glob.glob(os.path.join(save_recording_path, "*.[wW][eE][bB][mM]"))
141
+ )
142
+
143
+ # Run the agent
144
+ llm = utils.get_llm_model(
145
+ provider=llm_provider,
146
+ model_name=llm_model_name,
147
+ temperature=llm_temperature,
148
+ base_url=llm_base_url,
149
+ api_key=llm_api_key,
150
+ )
151
+ if agent_type == "org":
152
+ final_result, errors, model_actions, model_thoughts, trace_file, history_file = await run_org_agent(
153
+ llm=llm,
154
+ use_own_browser=use_own_browser,
155
+ keep_browser_open=keep_browser_open,
156
+ headless=headless,
157
+ disable_security=disable_security,
158
+ window_w=window_w,
159
+ window_h=window_h,
160
+ save_recording_path=save_recording_path,
161
+ save_agent_history_path=save_agent_history_path,
162
+ save_trace_path=save_trace_path,
163
+ task=task,
164
+ max_steps=max_steps,
165
+ use_vision=use_vision,
166
+ max_actions_per_step=max_actions_per_step,
167
+ tool_calling_method=tool_calling_method
168
+ )
169
+ elif agent_type == "custom":
170
+ final_result, errors, model_actions, model_thoughts, trace_file, history_file = await run_custom_agent(
171
+ llm=llm,
172
+ use_own_browser=use_own_browser,
173
+ keep_browser_open=keep_browser_open,
174
+ headless=headless,
175
+ disable_security=disable_security,
176
+ window_w=window_w,
177
+ window_h=window_h,
178
+ save_recording_path=save_recording_path,
179
+ save_agent_history_path=save_agent_history_path,
180
+ save_trace_path=save_trace_path,
181
+ task=task,
182
+ add_infos=add_infos,
183
+ max_steps=max_steps,
184
+ use_vision=use_vision,
185
+ max_actions_per_step=max_actions_per_step,
186
+ tool_calling_method=tool_calling_method
187
+ )
188
+ else:
189
+ raise ValueError(f"Invalid agent type: {agent_type}")
190
+
191
+ # Get the list of videos after the agent runs (if recording is enabled)
192
+ latest_video = None
193
+ if save_recording_path:
194
+ new_videos = set(
195
+ glob.glob(os.path.join(save_recording_path, "*.[mM][pP]4"))
196
+ + glob.glob(os.path.join(save_recording_path, "*.[wW][eE][bB][mM]"))
197
+ )
198
+ if new_videos - existing_videos:
199
+ latest_video = list(new_videos - existing_videos)[0] # Get the first new video
200
+
201
+ return (
202
+ final_result,
203
+ errors,
204
+ model_actions,
205
+ model_thoughts,
206
+ latest_video,
207
+ trace_file,
208
+ history_file,
209
+ gr.update(value="Stop", interactive=True), # Re-enable stop button
210
+ gr.update(interactive=True) # Re-enable run button
211
+ )
212
+
213
+ except gr.Error:
214
+ raise
215
+
216
+ except Exception as e:
217
+ import traceback
218
+ traceback.print_exc()
219
+ errors = str(e) + "\n" + traceback.format_exc()
220
+ return (
221
+ '', # final_result
222
+ errors, # errors
223
+ '', # model_actions
224
+ '', # model_thoughts
225
+ None, # latest_video
226
+ None, # history_file
227
+ None, # trace_file
228
+ gr.update(value="Stop", interactive=True), # Re-enable stop button
229
+ gr.update(interactive=True) # Re-enable run button
230
+ )
231
+
232
+
233
+ async def run_org_agent(
234
+ llm,
235
+ use_own_browser,
236
+ keep_browser_open,
237
+ headless,
238
+ disable_security,
239
+ window_w,
240
+ window_h,
241
+ save_recording_path,
242
+ save_agent_history_path,
243
+ save_trace_path,
244
+ task,
245
+ max_steps,
246
+ use_vision,
247
+ max_actions_per_step,
248
+ tool_calling_method
249
+ ):
250
+ try:
251
+ global _global_browser, _global_browser_context, _global_agent_state
252
+
253
+ # Clear any previous stop request
254
+ _global_agent_state.clear_stop()
255
+
256
+ extra_chromium_args = [f"--window-size={window_w},{window_h}"]
257
+ if use_own_browser:
258
+ chrome_path = os.getenv("CHROME_PATH", None)
259
+ if chrome_path == "":
260
+ chrome_path = None
261
+ chrome_user_data = os.getenv("CHROME_USER_DATA", None)
262
+ if chrome_user_data:
263
+ extra_chromium_args += [f"--user-data-dir={chrome_user_data}"]
264
+ else:
265
+ chrome_path = None
266
+
267
+ if _global_browser is None:
268
+ _global_browser = Browser(
269
+ config=BrowserConfig(
270
+ headless=headless,
271
+ disable_security=disable_security,
272
+ chrome_instance_path=chrome_path,
273
+ extra_chromium_args=extra_chromium_args,
274
+ )
275
+ )
276
+
277
+ if _global_browser_context is None:
278
+ _global_browser_context = await _global_browser.new_context(
279
+ config=BrowserContextConfig(
280
+ trace_path=save_trace_path if save_trace_path else None,
281
+ save_recording_path=save_recording_path if save_recording_path else None,
282
+ no_viewport=False,
283
+ browser_window_size=BrowserContextWindowSize(
284
+ width=window_w, height=window_h
285
+ ),
286
+ )
287
+ )
288
+
289
+ agent = Agent(
290
+ task=task,
291
+ llm=llm,
292
+ use_vision=use_vision,
293
+ browser=_global_browser,
294
+ browser_context=_global_browser_context,
295
+ max_actions_per_step=max_actions_per_step,
296
+ tool_calling_method=tool_calling_method
297
+ )
298
+ history = await agent.run(max_steps=max_steps)
299
+
300
+ history_file = os.path.join(save_agent_history_path, f"{agent.agent_id}.json")
301
+ agent.save_history(history_file)
302
+
303
+ final_result = history.final_result()
304
+ errors = history.errors()
305
+ model_actions = history.model_actions()
306
+ model_thoughts = history.model_thoughts()
307
+
308
+ trace_file = get_latest_files(save_trace_path)
309
+
310
+ return final_result, errors, model_actions, model_thoughts, trace_file.get('.zip'), history_file
311
+ except Exception as e:
312
+ import traceback
313
+ traceback.print_exc()
314
+ errors = str(e) + "\n" + traceback.format_exc()
315
+ return '', errors, '', '', None, None
316
+ finally:
317
+ # Handle cleanup based on persistence configuration
318
+ if not keep_browser_open:
319
+ if _global_browser_context:
320
+ await _global_browser_context.close()
321
+ _global_browser_context = None
322
+
323
+ if _global_browser:
324
+ await _global_browser.close()
325
+ _global_browser = None
326
+
327
+ async def run_custom_agent(
328
+ llm,
329
+ use_own_browser,
330
+ keep_browser_open,
331
+ headless,
332
+ disable_security,
333
+ window_w,
334
+ window_h,
335
+ save_recording_path,
336
+ save_agent_history_path,
337
+ save_trace_path,
338
+ task,
339
+ add_infos,
340
+ max_steps,
341
+ use_vision,
342
+ max_actions_per_step,
343
+ tool_calling_method
344
+ ):
345
+ try:
346
+ global _global_browser, _global_browser_context, _global_agent_state
347
+
348
+ # Clear any previous stop request
349
+ _global_agent_state.clear_stop()
350
+
351
+ extra_chromium_args = [f"--window-size={window_w},{window_h}"]
352
+ if use_own_browser:
353
+ chrome_path = os.getenv("CHROME_PATH", None)
354
+ if chrome_path == "":
355
+ chrome_path = None
356
+ chrome_user_data = os.getenv("CHROME_USER_DATA", None)
357
+ if chrome_user_data:
358
+ extra_chromium_args += [f"--user-data-dir={chrome_user_data}"]
359
+ else:
360
+ chrome_path = None
361
+
362
+ controller = CustomController()
363
+
364
+ # Initialize global browser if needed
365
+ if _global_browser is None:
366
+ _global_browser = CustomBrowser(
367
+ config=BrowserConfig(
368
+ headless=headless,
369
+ disable_security=disable_security,
370
+ chrome_instance_path=chrome_path,
371
+ extra_chromium_args=extra_chromium_args,
372
+ )
373
+ )
374
+
375
+ if _global_browser_context is None:
376
+ _global_browser_context = await _global_browser.new_context(
377
+ config=BrowserContextConfig(
378
+ trace_path=save_trace_path if save_trace_path else None,
379
+ save_recording_path=save_recording_path if save_recording_path else None,
380
+ no_viewport=False,
381
+ browser_window_size=BrowserContextWindowSize(
382
+ width=window_w, height=window_h
383
+ ),
384
+ )
385
+ )
386
+
387
+ # Create and run agent
388
+ agent = CustomAgent(
389
+ task=task,
390
+ add_infos=add_infos,
391
+ use_vision=use_vision,
392
+ llm=llm,
393
+ browser=_global_browser,
394
+ browser_context=_global_browser_context,
395
+ controller=controller,
396
+ system_prompt_class=CustomSystemPrompt,
397
+ agent_prompt_class=CustomAgentMessagePrompt,
398
+ max_actions_per_step=max_actions_per_step,
399
+ agent_state=_global_agent_state,
400
+ tool_calling_method=tool_calling_method
401
+ )
402
+ history = await agent.run(max_steps=max_steps)
403
+
404
+ history_file = os.path.join(save_agent_history_path, f"{agent.agent_id}.json")
405
+ agent.save_history(history_file)
406
+
407
+ final_result = history.final_result()
408
+ errors = history.errors()
409
+ model_actions = history.model_actions()
410
+ model_thoughts = history.model_thoughts()
411
+
412
+ trace_file = get_latest_files(save_trace_path)
413
+
414
+ return final_result, errors, model_actions, model_thoughts, trace_file.get('.zip'), history_file
415
+ except Exception as e:
416
+ import traceback
417
+ traceback.print_exc()
418
+ errors = str(e) + "\n" + traceback.format_exc()
419
+ return '', errors, '', '', None, None
420
+ finally:
421
+ # Handle cleanup based on persistence configuration
422
+ if not keep_browser_open:
423
+ if _global_browser_context:
424
+ await _global_browser_context.close()
425
+ _global_browser_context = None
426
+
427
+ if _global_browser:
428
+ await _global_browser.close()
429
+ _global_browser = None
430
+
431
+ async def run_with_stream(
432
+ agent_type,
433
+ llm_provider,
434
+ llm_model_name,
435
+ llm_temperature,
436
+ llm_base_url,
437
+ llm_api_key,
438
+ use_own_browser,
439
+ keep_browser_open,
440
+ headless,
441
+ disable_security,
442
+ window_w,
443
+ window_h,
444
+ save_recording_path,
445
+ save_agent_history_path,
446
+ save_trace_path,
447
+ enable_recording,
448
+ task,
449
+ add_infos,
450
+ max_steps,
451
+ use_vision,
452
+ max_actions_per_step,
453
+ tool_calling_method
454
+ ):
455
+ global _global_agent_state
456
+ stream_vw = 80
457
+ stream_vh = int(80 * window_h // window_w)
458
+ if not headless:
459
+ result = await run_browser_agent(
460
+ agent_type=agent_type,
461
+ llm_provider=llm_provider,
462
+ llm_model_name=llm_model_name,
463
+ llm_temperature=llm_temperature,
464
+ llm_base_url=llm_base_url,
465
+ llm_api_key=llm_api_key,
466
+ use_own_browser=use_own_browser,
467
+ keep_browser_open=keep_browser_open,
468
+ headless=headless,
469
+ disable_security=disable_security,
470
+ window_w=window_w,
471
+ window_h=window_h,
472
+ save_recording_path=save_recording_path,
473
+ save_agent_history_path=save_agent_history_path,
474
+ save_trace_path=save_trace_path,
475
+ enable_recording=enable_recording,
476
+ task=task,
477
+ add_infos=add_infos,
478
+ max_steps=max_steps,
479
+ use_vision=use_vision,
480
+ max_actions_per_step=max_actions_per_step,
481
+ tool_calling_method=tool_calling_method
482
+ )
483
+ # Add HTML content at the start of the result array
484
+ html_content = f"<h1 style='width:{stream_vw}vw; height:{stream_vh}vh'>Using browser...</h1>"
485
+ yield [html_content] + list(result)
486
+ else:
487
+ try:
488
+ _global_agent_state.clear_stop()
489
+ # Run the browser agent in the background
490
+ agent_task = asyncio.create_task(
491
+ run_browser_agent(
492
+ agent_type=agent_type,
493
+ llm_provider=llm_provider,
494
+ llm_model_name=llm_model_name,
495
+ llm_temperature=llm_temperature,
496
+ llm_base_url=llm_base_url,
497
+ llm_api_key=llm_api_key,
498
+ use_own_browser=use_own_browser,
499
+ keep_browser_open=keep_browser_open,
500
+ headless=headless,
501
+ disable_security=disable_security,
502
+ window_w=window_w,
503
+ window_h=window_h,
504
+ save_recording_path=save_recording_path,
505
+ save_agent_history_path=save_agent_history_path,
506
+ save_trace_path=save_trace_path,
507
+ enable_recording=enable_recording,
508
+ task=task,
509
+ add_infos=add_infos,
510
+ max_steps=max_steps,
511
+ use_vision=use_vision,
512
+ max_actions_per_step=max_actions_per_step,
513
+ tool_calling_method=tool_calling_method
514
+ )
515
+ )
516
+
517
+ # Initialize values for streaming
518
+ html_content = f"<h1 style='width:{stream_vw}vw; height:{stream_vh}vh'>Using browser...</h1>"
519
+ final_result = errors = model_actions = model_thoughts = ""
520
+ latest_videos = trace = history_file = None
521
+
522
+
523
+ # Periodically update the stream while the agent task is running
524
+ while not agent_task.done():
525
+ try:
526
+ encoded_screenshot = await capture_screenshot(_global_browser_context)
527
+ if encoded_screenshot is not None:
528
+ html_content = f'<img src="data:image/jpeg;base64,{encoded_screenshot}" style="width:{stream_vw}vw; height:{stream_vh}vh ; border:1px solid #ccc;">'
529
+ else:
530
+ html_content = f"<h1 style='width:{stream_vw}vw; height:{stream_vh}vh'>Waiting for browser session...</h1>"
531
+ except Exception as e:
532
+ html_content = f"<h1 style='width:{stream_vw}vw; height:{stream_vh}vh'>Waiting for browser session...</h1>"
533
+
534
+ if _global_agent_state and _global_agent_state.is_stop_requested():
535
+ yield [
536
+ html_content,
537
+ final_result,
538
+ errors,
539
+ model_actions,
540
+ model_thoughts,
541
+ latest_videos,
542
+ trace,
543
+ history_file,
544
+ gr.update(value="Stopping...", interactive=False), # stop_button
545
+ gr.update(interactive=False), # run_button
546
+ ]
547
+ break
548
+ else:
549
+ yield [
550
+ html_content,
551
+ final_result,
552
+ errors,
553
+ model_actions,
554
+ model_thoughts,
555
+ latest_videos,
556
+ trace,
557
+ history_file,
558
+ gr.update(value="Stop", interactive=True), # Re-enable stop button
559
+ gr.update(interactive=True) # Re-enable run button
560
+ ]
561
+ await asyncio.sleep(0.05)
562
+
563
+ # Once the agent task completes, get the results
564
+ try:
565
+ result = await agent_task
566
+ final_result, errors, model_actions, model_thoughts, latest_videos, trace, history_file, stop_button, run_button = result
567
+ except gr.Error:
568
+ final_result = ""
569
+ model_actions = ""
570
+ model_thoughts = ""
571
+ latest_videos = trace = history_file = None
572
+
573
+ except Exception as e:
574
+ errors = f"Agent error: {str(e)}"
575
+
576
+ yield [
577
+ html_content,
578
+ final_result,
579
+ errors,
580
+ model_actions,
581
+ model_thoughts,
582
+ latest_videos,
583
+ trace,
584
+ history_file,
585
+ stop_button,
586
+ run_button
587
+ ]
588
+
589
+ except Exception as e:
590
+ import traceback
591
+ yield [
592
+ f"<h1 style='width:{stream_vw}vw; height:{stream_vh}vh'>Waiting for browser session...</h1>",
593
+ "",
594
+ f"Error: {str(e)}\n{traceback.format_exc()}",
595
+ "",
596
+ "",
597
+ None,
598
+ None,
599
+ None,
600
+ gr.update(value="Stop", interactive=True), # Re-enable stop button
601
+ gr.update(interactive=True) # Re-enable run button
602
+ ]
603
+
604
+ # Define the theme map globally
605
+ theme_map = {
606
+ "Default": Default(),
607
+ "Soft": Soft(),
608
+ "Monochrome": Monochrome(),
609
+ "Glass": Glass(),
610
+ "Origin": Origin(),
611
+ "Citrus": Citrus(),
612
+ "Ocean": Ocean(),
613
+ "Base": Base()
614
+ }
615
+
616
+ async def close_global_browser():
617
+ global _global_browser, _global_browser_context
618
+
619
+ if _global_browser_context:
620
+ await _global_browser_context.close()
621
+ _global_browser_context = None
622
+
623
+ if _global_browser:
624
+ await _global_browser.close()
625
+ _global_browser = None
626
+
627
+ async def run_deep_search(research_task, max_search_iteration_input, max_query_per_iter_input, llm_provider, llm_model_name, llm_temperature, llm_base_url, llm_api_key, use_vision, use_own_browser, headless):
628
+ from src.utils.deep_research import deep_research
629
+ global _global_agent_state
630
+
631
+ # Clear any previous stop request
632
+ _global_agent_state.clear_stop()
633
+
634
+ llm = utils.get_llm_model(
635
+ provider=llm_provider,
636
+ model_name=llm_model_name,
637
+ temperature=llm_temperature,
638
+ base_url=llm_base_url,
639
+ api_key=llm_api_key,
640
+ )
641
+ markdown_content, file_path = await deep_research(research_task, llm, _global_agent_state,
642
+ max_search_iterations=max_search_iteration_input,
643
+ max_query_num=max_query_per_iter_input,
644
+ use_vision=use_vision,
645
+ headless=headless,
646
+ use_own_browser=use_own_browser
647
+ )
648
+
649
+ return markdown_content, file_path, gr.update(value="Stop", interactive=True), gr.update(interactive=True)
650
+
651
+
652
+
653
+ # Collection of research prompt examples that are useful for general users
654
+ research_prompts = [
655
+ "Explain how I can improve my sleep habits based on recent scientific findings. Include practical tips for different types of sleep problems.",
656
+ "Compare the most effective methods for learning a new language as an adult. What approaches work best for different learning styles?",
657
+ "Analyze the pros and cons of different retirement savings options for someone in their 30s, considering both traditional and newer investment approaches.",
658
+ "Create a comprehensive guide to reducing household food waste, including storage tips, meal planning strategies, and creative ways to use leftovers.",
659
+ "Research and explain the impact of screen time on children's development, with age-appropriate guidelines and alternatives to digital entertainment.",
660
+ "Provide an analysis of how to identify reliable health information online, with specific red flags to watch for and trusted resources to use instead.",
661
+ "Explore effective strategies for managing work-life balance in a remote/hybrid work environment, including boundary-setting techniques.",
662
+ "Research and summarize the environmental impact of common household choices (transportation, food, energy use) and suggest realistic changes with the biggest positive effect.",
663
+ "Create a guide to understanding and improving personal credit scores, including common misconceptions and step-by-step actions to take.",
664
+ "Compare different approaches to stress management based on scientific research, tailored for people with busy schedules and limited free time."
665
+ ]
666
+
667
+ def get_random_prompt():
668
+ """Return a randomly selected prompt from the collection"""
669
+ return random.choice(research_prompts)
670
+
671
+
672
+ def create_ui(config, theme_name="Ocean"):
673
+ css = """
674
+ .gradio-container {
675
+ max-width: 1200px !important;
676
+ margin: auto !important;
677
+ padding-top: 20px !important;
678
+ }
679
+ .header-text {
680
+ text-align: center;
681
+ margin-bottom: 30px;
682
+ }
683
+ .theme-section {
684
+ margin-bottom: 20px;
685
+ padding: 15px;
686
+ border-radius: 10px;
687
+ }
688
+ """
689
+
690
+ with gr.Blocks(
691
+ title="animality.ai", theme=theme_map[theme_name], css=css
692
+ ) as demo:
693
+ with gr.Row():
694
+ gr.Markdown(
695
+ """
696
+ # animality.ai
697
+ ### web surfingโ€” evolved
698
+ """,
699
+ elem_classes=["header-text"],
700
+ )
701
+
702
+ with gr.Tabs() as tabs:
703
+ with gr.TabItem("โš™๏ธ Agent Settings", id=1):
704
+ with gr.Group():
705
+ agent_type = gr.Radio(
706
+ ["org", "custom"],
707
+ label="Agent Type",
708
+ value=config['agent_type'],
709
+ info="Select the type of agent to use",
710
+ )
711
+ with gr.Column():
712
+ max_steps = gr.Slider(
713
+ minimum=1,
714
+ maximum=100,
715
+ value=config['max_steps'],
716
+ step=1,
717
+ label="Max Run Steps",
718
+ info="Maximum number of steps the agent will take",
719
+ )
720
+ max_actions_per_step = gr.Slider(
721
+ minimum=1,
722
+ maximum=10,
723
+ value=config['max_actions_per_step'],
724
+ step=1,
725
+ label="Max Actions per Step",
726
+ info="Maximum number of actions the agent will take per step",
727
+ )
728
+ with gr.Column():
729
+ use_vision = gr.Checkbox(
730
+ label="Use Vision",
731
+ value=config['use_vision'],
732
+ info="Enable visual processing capabilities",
733
+ )
734
+ tool_calling_method = gr.Dropdown(
735
+ label="Tool Calling Method",
736
+ value=config['tool_calling_method'],
737
+ interactive=True,
738
+ allow_custom_value=True, # Allow users to input custom model names
739
+ choices=["auto", "json_schema", "function_calling"],
740
+ info="Tool Calls Funtion Name",
741
+ visible=False
742
+ )
743
+
744
+ with gr.TabItem("๐Ÿ”ง LLM Configuration", id=2):
745
+ with gr.Group():
746
+ llm_provider = gr.Dropdown(
747
+ choices=[provider for provider,model in utils.model_names.items()],
748
+ label="LLM Provider",
749
+ value=config['llm_provider'],
750
+ info="Select your preferred language model provider"
751
+ )
752
+ llm_model_name = gr.Dropdown(
753
+ label="Model Name",
754
+ choices=utils.model_names['openai'],
755
+ value=config['llm_model_name'],
756
+ interactive=True,
757
+ allow_custom_value=True, # Allow users to input custom model names
758
+ info="Select a model from the dropdown or type a custom model name"
759
+ )
760
+ llm_temperature = gr.Slider(
761
+ minimum=0.0,
762
+ maximum=2.0,
763
+ value=config['llm_temperature'],
764
+ step=0.1,
765
+ label="Temperature",
766
+ info="Controls randomness in model outputs"
767
+ )
768
+ with gr.Row():
769
+ llm_base_url = gr.Textbox(
770
+ label="Base URL",
771
+ value=config['llm_base_url'],
772
+ info="API endpoint URL (if required)"
773
+ )
774
+ llm_api_key = gr.Textbox(
775
+ label="API Key",
776
+ type="password",
777
+ value=config['llm_api_key'],
778
+ info="Your API key (leave blank to use .env)"
779
+ )
780
+
781
+ with gr.TabItem("๐ŸŒ Window Settings", id=3):
782
+ with gr.Group():
783
+ with gr.Row():
784
+ use_own_browser = gr.Checkbox(
785
+ label="Use Own Browser",
786
+ value=config['use_own_browser'],
787
+ info="Use your existing browser instance",
788
+ )
789
+ keep_browser_open = gr.Checkbox(
790
+ label="Keep Browser Open",
791
+ value=config['keep_browser_open'],
792
+ info="Keep Browser Open between Tasks",
793
+ )
794
+ headless = gr.Checkbox(
795
+ label="Headless Mode",
796
+ value=config['headless'],
797
+ info="Run browser without GUI",
798
+ )
799
+ disable_security = gr.Checkbox(
800
+ label="Disable Security",
801
+ value=config['disable_security'],
802
+ info="Disable browser security features",
803
+ )
804
+ enable_recording = gr.Checkbox(
805
+ label="Enable Recording",
806
+ value=config['enable_recording'],
807
+ info="Enable saving browser recordings",
808
+ )
809
+
810
+ with gr.Row():
811
+ window_w = gr.Number(
812
+ label="Window Width",
813
+ value=config['window_w'],
814
+ info="Browser window width",
815
+ )
816
+ window_h = gr.Number(
817
+ label="Window Height",
818
+ value=config['window_h'],
819
+ info="Browser window height",
820
+ )
821
+
822
+ save_recording_path = gr.Textbox(
823
+ label="Recording Path",
824
+ placeholder="e.g. ./tmp/record_videos",
825
+ value=config['save_recording_path'],
826
+ info="Path to save browser recordings",
827
+ interactive=True, # Allow editing only if recording is enabled
828
+ )
829
+
830
+ save_trace_path = gr.Textbox(
831
+ label="Trace Path",
832
+ placeholder="e.g. ./tmp/traces",
833
+ value=config['save_trace_path'],
834
+ info="Path to save Agent traces",
835
+ interactive=True,
836
+ )
837
+
838
+ save_agent_history_path = gr.Textbox(
839
+ label="Agent History Save Path",
840
+ placeholder="e.g., ./tmp/agent_history",
841
+ value=config['save_agent_history_path'],
842
+ info="Specify the directory where agent history should be saved.",
843
+ interactive=True,
844
+ )
845
+
846
+ with gr.TabItem("๐Ÿค– Run Agent", id=4):
847
+ task = gr.Textbox(
848
+ label="Task Description",
849
+ lines=4,
850
+ placeholder="Enter your task here...",
851
+ value=config['task'],
852
+ info="Describe what you want the agent to do",
853
+ )
854
+ add_infos = gr.Textbox(
855
+ label="Additional Information",
856
+ lines=3,
857
+ placeholder="Add any helpful context or instructions...",
858
+ info="Optional hints to help the LLM complete the task",
859
+ )
860
+
861
+ with gr.Row():
862
+ run_button = gr.Button("โ–ถ๏ธ Run Agent", variant="primary", scale=2)
863
+ stop_button = gr.Button("โน๏ธ Stop", variant="stop", scale=1)
864
+
865
+ with gr.Row():
866
+ browser_view = gr.HTML(
867
+ value="<h1 style='width:80vw; height:50vh'>Waiting for browser session...</h1>",
868
+ label="Live Browser View",
869
+ )
870
+
871
+
872
+
873
+ with gr.TabItem("๐Ÿง Deep Research", id=5):
874
+ with gr.Row():
875
+ research_task_input = gr.Textbox(
876
+ label="Research Task",
877
+ lines=5,
878
+ value=get_random_prompt # Function call on page load
879
+ )
880
+ refresh_prompt_btn = gr.Button("๐Ÿ”„ New Example", scale=0.15)
881
+ with gr.Row():
882
+ max_search_iteration_input = gr.Number(label="Max Search Iteration", value=3, precision=0) # precision=0 Ensure it is an integer
883
+ max_query_per_iter_input = gr.Number(label="Max Query per Iteration", value=1, precision=0) # precision=0 Ensure it is an integer
884
+ with gr.Row():
885
+ research_button = gr.Button("โ–ถ๏ธ Run Deep Research", variant="primary", scale=2)
886
+ stop_research_button = gr.Button("โน๏ธ Stop", variant="stop", scale=1)
887
+ markdown_output_display = gr.Markdown(label="Research Report")
888
+ markdown_download = gr.File(label="Download Research Report")
889
+ # Connect the refresh button to get a new random prompt
890
+ refresh_prompt_btn.click(fn=get_random_prompt, outputs=research_task_input)
891
+
892
+ with gr.TabItem("๐Ÿ“Š Results", id=6):
893
+ with gr.Group():
894
+
895
+ recording_display = gr.Video(label="Latest Recording")
896
+
897
+ gr.Markdown("### Results")
898
+ with gr.Row():
899
+ with gr.Column():
900
+ final_result_output = gr.Textbox(
901
+ label="Final Result", lines=3, show_label=True
902
+ )
903
+ with gr.Column():
904
+ errors_output = gr.Textbox(
905
+ label="Errors", lines=3, show_label=True
906
+ )
907
+ with gr.Row():
908
+ with gr.Column():
909
+ model_actions_output = gr.Textbox(
910
+ label="Model Actions", lines=3, show_label=True
911
+ )
912
+ with gr.Column():
913
+ model_thoughts_output = gr.Textbox(
914
+ label="Model Thoughts", lines=3, show_label=True
915
+ )
916
+
917
+ trace_file = gr.File(label="Trace File")
918
+
919
+ agent_history_file = gr.File(label="Agent History")
920
+
921
+ # Bind the stop button click event after errors_output is defined
922
+ stop_button.click(
923
+ fn=stop_agent,
924
+ inputs=[],
925
+ outputs=[errors_output, stop_button, run_button],
926
+ )
927
+
928
+ # Run button click handler
929
+ run_button.click(
930
+ fn=run_with_stream,
931
+ inputs=[
932
+ agent_type, llm_provider, llm_model_name, llm_temperature, llm_base_url, llm_api_key,
933
+ use_own_browser, keep_browser_open, headless, disable_security, window_w, window_h,
934
+ save_recording_path, save_agent_history_path, save_trace_path, # Include the new path
935
+ enable_recording, task, add_infos, max_steps, use_vision, max_actions_per_step, tool_calling_method
936
+ ],
937
+ outputs=[
938
+ browser_view, # Browser view
939
+ final_result_output, # Final result
940
+ errors_output, # Errors
941
+ model_actions_output, # Model actions
942
+ model_thoughts_output, # Model thoughts
943
+ recording_display, # Latest recording
944
+ trace_file, # Trace file
945
+ agent_history_file, # Agent history file
946
+ stop_button, # Stop button
947
+ run_button # Run button
948
+ ],
949
+ )
950
+
951
+ # Run Deep Research
952
+ research_button.click(
953
+ fn=run_deep_search,
954
+ inputs=[research_task_input, max_search_iteration_input, max_query_per_iter_input, llm_provider, llm_model_name, llm_temperature, llm_base_url, llm_api_key, use_vision, use_own_browser, headless],
955
+ outputs=[markdown_output_display, markdown_download, stop_research_button, research_button]
956
+ )
957
+ # Bind the stop button click event after errors_output is defined
958
+ stop_research_button.click(
959
+ fn=stop_research_agent,
960
+ inputs=[],
961
+ outputs=[stop_research_button, research_button],
962
+ )
963
+
964
+ with gr.TabItem("๐ŸŽฅ Recordings", id=7):
965
+ def list_recordings(save_recording_path):
966
+ if not os.path.exists(save_recording_path):
967
+ return []
968
+
969
+ # Get all video files
970
+ recordings = glob.glob(os.path.join(save_recording_path, "*.[mM][pP]4")) + glob.glob(os.path.join(save_recording_path, "*.[wW][eE][bB][mM]"))
971
+
972
+ # Sort recordings by creation time (oldest first)
973
+ recordings.sort(key=os.path.getctime)
974
+
975
+ # Add numbering to the recordings
976
+ numbered_recordings = []
977
+ for idx, recording in enumerate(recordings, start=1):
978
+ filename = os.path.basename(recording)
979
+ numbered_recordings.append((recording, f"{idx}. {filename}"))
980
+
981
+ return numbered_recordings
982
+
983
+ recordings_gallery = gr.Gallery(
984
+ label="Recordings",
985
+ value=list_recordings(config['save_recording_path']),
986
+ columns=3,
987
+ height="auto",
988
+ object_fit="contain"
989
+ )
990
+
991
+ refresh_button = gr.Button("๐Ÿ”„ Refresh Recordings", variant="secondary")
992
+ refresh_button.click(
993
+ fn=list_recordings,
994
+ inputs=save_recording_path,
995
+ outputs=recordings_gallery
996
+ )
997
+
998
+ with gr.TabItem("๐Ÿ“ Configuration", id=8):
999
+ with gr.Group():
1000
+ config_file_input = gr.File(
1001
+ label="Load Config File",
1002
+ file_types=[".pkl"],
1003
+ interactive=True
1004
+ )
1005
+
1006
+ load_config_button = gr.Button("Load Existing Config From File", variant="primary")
1007
+ save_config_button = gr.Button("Save Current Config", variant="primary")
1008
+
1009
+ config_status = gr.Textbox(
1010
+ label="Status",
1011
+ lines=2,
1012
+ interactive=False
1013
+ )
1014
+
1015
+ load_config_button.click(
1016
+ fn=update_ui_from_config,
1017
+ inputs=[config_file_input],
1018
+ outputs=[
1019
+ agent_type, max_steps, max_actions_per_step, use_vision, tool_calling_method,
1020
+ llm_provider, llm_model_name, llm_temperature, llm_base_url, llm_api_key,
1021
+ use_own_browser, keep_browser_open, headless, disable_security, enable_recording,
1022
+ window_w, window_h, save_recording_path, save_trace_path, save_agent_history_path,
1023
+ task, config_status
1024
+ ]
1025
+ )
1026
+
1027
+ save_config_button.click(
1028
+ fn=save_current_config,
1029
+ inputs=[
1030
+ agent_type, max_steps, max_actions_per_step, use_vision, tool_calling_method,
1031
+ llm_provider, llm_model_name, llm_temperature, llm_base_url, llm_api_key,
1032
+ use_own_browser, keep_browser_open, headless, disable_security,
1033
+ enable_recording, window_w, window_h, save_recording_path, save_trace_path,
1034
+ save_agent_history_path, task,
1035
+ ],
1036
+ outputs=[config_status]
1037
+ )
1038
+
1039
+
1040
+ # Attach the callback to the LLM provider dropdown
1041
+ llm_provider.change(
1042
+ lambda provider, api_key, base_url: update_model_dropdown(provider, api_key, base_url),
1043
+ inputs=[llm_provider, llm_api_key, llm_base_url],
1044
+ outputs=llm_model_name
1045
+ )
1046
+
1047
+ # Add this after defining the components
1048
+ enable_recording.change(
1049
+ lambda enabled: gr.update(interactive=enabled),
1050
+ inputs=enable_recording,
1051
+ outputs=save_recording_path
1052
+ )
1053
+
1054
+ use_own_browser.change(fn=close_global_browser)
1055
+ keep_browser_open.change(fn=close_global_browser)
1056
+
1057
+ return demo
1058
+
1059
+ def main():
1060
+ parser = argparse.ArgumentParser(description="Gradio UI for Browser Agent")
1061
+ parser.add_argument("--ip", type=str, default="127.0.0.1", help="IP address to bind to")
1062
+ parser.add_argument("--port", type=int, default=7788, help="Port to listen on")
1063
+ parser.add_argument("--theme", type=str, default="Ocean", choices=theme_map.keys(), help="Theme to use for the UI")
1064
+ parser.add_argument("--dark-mode", action="store_true", help="Enable dark mode")
1065
+ args = parser.parse_args()
1066
+
1067
+ config_dict = default_config()
1068
+
1069
+ demo = create_ui(config_dict, theme_name=args.theme)
1070
+ demo.launch(server_name=args.ip, server_port=args.port, pwa=True, share=True)
1071
+
1072
+ if __name__ == '__main__':
1073
+ main()