Priyansh Saxena commited on
Commit
14cc93b
·
2 Parent(s): 416c5bdd44409c

Resolved conflicts by keeping HuggingFace versions

Browse files
.gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
.gitignore CHANGED
@@ -1,11 +1,38 @@
 
1
  __pycache__/
 
 
2
  *.pyc
3
  *.pyo
4
  *.pyd
 
 
5
  .Python
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
  env/
7
  venv/
8
  .venv/
 
 
 
 
 
9
  pip-log.txt
10
  pip-delete-this-directory.txt
11
  .tox/
@@ -15,15 +42,41 @@ pip-delete-this-directory.txt
15
  nosetests.xml
16
  coverage.xml
17
  *.cover
18
- *.log
19
- .git
20
- .mypy_cache
21
  .pytest_cache
22
  .hypothesis
23
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
24
  .DS_Store
 
 
 
25
  .env
26
  .flaskenv
27
  *.env
28
 
 
29
  gradio_queue.db
 
 
 
 
 
 
 
 
 
 
 
1
+ # Byte-compiled / optimized / DLL files
2
  __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
  *.pyc
6
  *.pyo
7
  *.pyd
8
+
9
+ # Distribution / packaging
10
  .Python
11
+ build/
12
+ develop-eggs/
13
+ dist/
14
+ downloads/
15
+ eggs/
16
+ .eggs/
17
+ lib/
18
+ lib64/
19
+ parts/
20
+ sdist/
21
+ var/
22
+ wheels/
23
+ *.egg-info/
24
+ .installed.cfg
25
+ *.egg
26
+
27
+ # Virtual environments
28
  env/
29
  venv/
30
  .venv/
31
+ ENV/
32
+ env.bak/
33
+ venv.bak/
34
+
35
+ # Testing / coverage
36
  pip-log.txt
37
  pip-delete-this-directory.txt
38
  .tox/
 
42
  nosetests.xml
43
  coverage.xml
44
  *.cover
 
 
 
45
  .pytest_cache
46
  .hypothesis
47
 
48
+ # Logs
49
+ *.log
50
+
51
+ # mypy
52
+ .mypy_cache
53
+ .git
54
+
55
+ # IDE
56
+ .vscode/
57
+ .idea/
58
+ *.swp
59
+ *.swo
60
+ *~
61
+
62
+ # OS
63
  .DS_Store
64
+ Thumbs.db
65
+
66
+ # Environment files
67
  .env
68
  .flaskenv
69
  *.env
70
 
71
+ # Application specific
72
  gradio_queue.db
73
+ user_sessions/
74
+ temp_data/
75
+ cache/
76
+
77
+ # Ollama models cache (large files)
78
+ .ollama/
79
+
80
+ # Local configuration
81
+ config.local.*
82
+ .env.local
Dockerfile CHANGED
@@ -1,37 +1,108 @@
1
- # Use Python 3.11 slim image for HuggingFace Spaces
2
  FROM python:3.11-slim
3
 
 
 
 
 
 
 
 
 
 
4
  # Set working directory
5
  WORKDIR /app
6
 
7
- # Install system dependencies
8
  RUN apt-get update && apt-get install -y \
9
  curl \
 
 
 
10
  && rm -rf /var/lib/apt/lists/*
11
 
12
- # Copy requirements first for better caching
13
- COPY requirements.txt .
14
 
15
- # Install Python dependencies
16
- RUN pip install --no-cache-dir --upgrade pip && \
17
- pip install --no-cache-dir -r requirements.txt
18
 
19
  # Copy application code
20
  COPY . .
21
 
22
- # Create necessary directories
23
- RUN mkdir -p logs cache
 
 
24
 
25
- # Set environment variables for HuggingFace Spaces
26
- ENV PYTHONPATH=/app
27
- ENV PYTHONUNBUFFERED=1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
28
 
29
- # Expose port 7860 (HuggingFace Spaces default)
30
- EXPOSE 7860
31
 
32
- # Health check
33
- HEALTHCHECK --interval=30s --timeout=30s --start-period=5s --retries=3 \
34
  CMD curl -f http://localhost:7860/health || exit 1
35
 
36
- # Run the application
37
- CMD ["python", "app_fastapi.py"]
 
1
+ # Multi-stage Dockerfile for HuggingFace Spaces with Ollama
2
  FROM python:3.11-slim
3
 
4
+ # Set environment variables
5
+ ENV PYTHONUNBUFFERED=1
6
+ ENV DEBIAN_FRONTEND=noninteractive
7
+ ENV OLLAMA_HOST=0.0.0.0
8
+ ENV OLLAMA_PORT=11434
9
+ ENV OLLAMA_HOME=/app/.ollama
10
+ ENV HOME=/app
11
+ ENV PYTHONPATH=/app
12
+
13
  # Set working directory
14
  WORKDIR /app
15
 
16
+ # Install system dependencies including Ollama requirements
17
  RUN apt-get update && apt-get install -y \
18
  curl \
19
+ wget \
20
+ build-essential \
21
+ git \
22
  && rm -rf /var/lib/apt/lists/*
23
 
24
+ # Install Ollama
25
+ RUN curl -fsSL https://ollama.ai/install.sh | sh
26
 
27
+ # Copy requirements first for better Docker caching
28
+ COPY requirements.txt .
29
+ RUN pip install --no-cache-dir -r requirements.txt
30
 
31
  # Copy application code
32
  COPY . .
33
 
34
+ # Create necessary directories including Ollama data directory
35
+ RUN mkdir -p logs cache templates static $OLLAMA_HOME \
36
+ && chown -R 1000:1000 /app \
37
+ && chmod -R 755 /app
38
 
39
+ # Expose ports for both app and Ollama
40
+ EXPOSE 7860 11434
41
+
42
+ # Create startup script
43
+ RUN echo '#!/bin/bash\n\
44
+ set -e\n\
45
+ echo "🚀 Starting HuggingFace Spaces Web3 Research Co-Pilot..."\n\
46
+ \n\
47
+ # Create Ollama data directory with proper permissions\n\
48
+ echo "🗂️ Setting up Ollama data directory..."\n\
49
+ mkdir -p /app/.ollama\n\
50
+ chmod -R 755 /app/.ollama\n\
51
+ chown -R $(whoami):$(whoami) /app/.ollama 2>/dev/null || true\n\
52
+ echo "Directory created: $(ls -la /app/.ollama)"\n\
53
+ \n\
54
+ # Start Ollama server with explicit home directory\n\
55
+ echo "📦 Starting Ollama server with data directory /app/.ollama..."\n\
56
+ export HOME=/app\n\
57
+ export OLLAMA_HOME=/app/.ollama\n\
58
+ cd /app\n\
59
+ ollama serve &\n\
60
+ OLLAMA_PID=$!\n\
61
+ \n\
62
+ # Wait for Ollama to be ready\n\
63
+ echo "⏳ Waiting for Ollama to be ready..."\n\
64
+ while ! curl -s http://localhost:11434/api/tags > /dev/null; do\n\
65
+ sleep 2\n\
66
+ echo " ... still waiting for Ollama"\n\
67
+ done\n\
68
+ \n\
69
+ echo "✅ Ollama server is ready!"\n\
70
+ \n\
71
+ # Pull the Llama 3.1 8B model\n\
72
+ echo "📥 Pulling llama3.1:8b model (this may take a few minutes)..."\n\
73
+ export HOME=/app\n\
74
+ export OLLAMA_HOME=/app/.ollama\n\
75
+ cd /app\n\
76
+ ollama pull llama3.1:8b\n\
77
+ echo "✅ Model llama3.1:8b ready!"\n\
78
+ \n\
79
+ # Start the main application\n\
80
+ echo "🌐 Starting Web3 Research Co-Pilot web application..."\n\
81
+ echo "🔍 Running startup validation..."\n\
82
+ python validate_startup.py || exit 1\n\
83
+ python app.py &\n\
84
+ APP_PID=$!\n\
85
+ \n\
86
+ # Function to handle shutdown\n\
87
+ cleanup() {\n\
88
+ echo "🛑 Shutting down gracefully..."\n\
89
+ kill $APP_PID $OLLAMA_PID 2>/dev/null || true\n\
90
+ wait $APP_PID $OLLAMA_PID 2>/dev/null || true\n\
91
+ echo "✅ Shutdown complete"\n\
92
+ }\n\
93
+ \n\
94
+ # Set up signal handlers\n\
95
+ trap cleanup SIGTERM SIGINT\n\
96
+ \n\
97
+ # Wait for processes\n\
98
+ wait $APP_PID $OLLAMA_PID' > start.sh
99
 
100
+ # Make startup script executable
101
+ RUN chmod +x start.sh
102
 
103
+ # Health check with longer startup time for model download
104
+ HEALTHCHECK --interval=30s --timeout=10s --start-period=600s --retries=3 \
105
  CMD curl -f http://localhost:7860/health || exit 1
106
 
107
+ # Start command
108
+ CMD ["./start.sh"]
README.md CHANGED
@@ -1,129 +1,119 @@
1
- # 🚀 Web3 Research Co-Pilot
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
 
3
- AI-powered cryptocurrency research assistant with comprehensive Web3 data analysis capabilities.
4
 
5
- ## Features
6
 
7
- - **LangChain AI Agent**: Advanced query processing with Google Gemini
8
- - **Real-time Data**: CoinGecko, DeFiLlama, Etherscan integration
9
- - **Interactive UI**: Gradio-based chat interface with visualizations
10
- - **AIRAA Integration**: Research data forwarding to external platforms
11
- - **Production Ready**: Comprehensive error handling and async architecture
12
 
13
- ## Quick Start
 
 
 
 
 
14
 
15
- ### 1. Environment Setup
16
 
17
- ```bash
18
- export GEMINI_API_KEY="your_gemini_api_key"
19
- export ETHERSCAN_API_KEY="your_etherscan_key" # Optional
20
- export COINGECKO_API_KEY="your_coingecko_key" # Optional
21
- ```
 
22
 
23
- ### 2. Installation
24
 
25
- ```bash
26
- pip install -r requirements.txt
27
- ```
 
 
28
 
29
- ### 3. Launch
30
 
 
31
  ```bash
32
- python launch.py
33
- ```
34
-
35
- ## API Keys
36
 
37
- - **GEMINI_API_KEY** (Required): [Get from Google AI Studio](https://makersuite.google.com/app/apikey)
38
- - **ETHERSCAN_API_KEY** (Optional): [Get from Etherscan.io](https://etherscan.io/apis)
39
- - **COINGECKO_API_KEY** (Optional): [Get from CoinGecko](https://www.coingecko.com/en/api/pricing)
40
 
41
- ## Architecture
 
 
42
 
 
 
43
  ```
44
- ├── app.py # Main Gradio application
45
- ├── src/
46
- │ ├── agent/ # LangChain AI agent
47
- │ ├── tools/ # Web3 data tools
48
- │ ├── api/ # External integrations
49
- │ └── utils/ # Configuration & utilities
50
- └── launch.py # Launch script
51
- ```
52
-
53
- ## Usage Examples
54
-
55
- - "What is the current price of Bitcoin?"
56
- - "Analyze Ethereum's DeFi ecosystem"
57
- - "Show me gas prices and network stats"
58
- - "Research the top DeFi protocols by TVL"
59
-
60
- ## Deployment
61
-
62
- Configured for HuggingFace Spaces with automatic dependency management.
63
 
64
- ---
65
-
66
- **Built with minimal, expert-level code and production-grade error handling.**
67
-
68
- ## Features
69
-
70
- - **Real-time Market Analysis**: CoinGecko, DeFiLlama, Etherscan integration
71
- - **AI Research Agent**: Powered by Google Gemini
72
- - **Interactive Interface**: Modern Gradio UI
73
- - **Data Visualization**: Price charts and market overviews
74
- - **AIRAA Integration**: Webhook support for external platforms
75
-
76
- ## Quick Start
77
-
78
- 1. **Clone and Setup**
79
  ```bash
80
- git clone <repository-url>
81
- cd web3-research-agent
82
- pip install -r requirements.txt
83
  ```
84
 
85
- 2. **Environment Configuration**
86
- ```bash
87
- cp .env.example .env
88
- # Edit .env with your API keys
89
- ```
90
 
91
- 3. **Run Application**
92
- ```bash
93
- python app.py
 
 
 
 
 
 
 
94
  ```
95
 
96
- ## Required API Keys
97
 
98
- - `GEMINI_API_KEY`: Google Gemini AI (required)
99
- - `ETHERSCAN_API_KEY`: Ethereum blockchain data
100
- - `COINGECKO_API_KEY`: Cryptocurrency market data (optional)
101
- - `AIRAA_WEBHOOK_URL`: External integration (optional)
 
102
 
103
- ## Deployment
104
 
105
- ### Docker
106
- ```bash
107
- docker build -t web3-research-agent .
108
- docker run -p 7860:7860 --env-file .env web3-research-agent
109
- ```
110
 
111
- ### Hugging Face Spaces
112
- Upload repository to HF Spaces with environment variables configured.
113
 
114
- ## Architecture
115
 
116
- - **Agent**: LangChain-based research agent with memory
117
- - **Tools**: Modular API integrations (CoinGecko, DeFiLlama, Etherscan)
118
- - **UI**: Gradio interface with chat and visualization
119
- - **Cache**: Optimized caching for API responses
120
- - **Integration**: AIRAA webhook support
121
 
122
- ## Usage Examples
123
 
124
- - "Bitcoin price analysis and market sentiment"
125
- - "Top DeFi protocols by TVL"
126
- - "Ethereum gas prices and network stats"
127
- - "Compare BTC vs ETH performance"
128
 
129
- Built with ❤️ for Web3 research
 
1
+ ---
2
+ title: Web3 Research Co-Pilot
3
+ emoji: 🚀
4
+ colorFrom: blue
5
+ colorTo: green
6
+ sdk: docker
7
+ app_file: app.py
8
+ dockerfile: Dockerfile
9
+ license: mit
10
+ tags:
11
+ - cryptocurrency
12
+ - blockchain
13
+ - defi
14
+ - ai-research
15
+ - ollama
16
+ - llama3
17
+ pinned: false
18
+ header: default
19
+ short_description: AI-powered crypto research with real-time blockchain data
20
+ suggested_hardware: t4-medium
21
+ ---
22
 
23
+ # Web3 Research Co-Pilot 🚀
24
 
25
+ An AI-powered cryptocurrency research assistant that provides real-time blockchain analytics, DeFi insights, and market intelligence using Llama 8B and comprehensive API integrations.
26
 
27
+ ## Features
 
 
 
 
28
 
29
+ - **🤖 AI-Powered Analysis**: Uses Llama 8B model via Ollama for intelligent responses
30
+ - **🔗 Real-Time Data**: Integrates with CryptoCompare, DeFiLlama, Etherscan APIs
31
+ - **🛡️ AI Safety**: Built-in content filtering and safety guardrails
32
+ - **📊 Interactive UI**: Modern web interface with dark/light themes
33
+ - **⚡ Streaming Responses**: Real-time progress updates during analysis
34
+ - **🔄 Comprehensive Tools**: 5+ specialized cryptocurrency research tools
35
 
36
+ ## 🛠️ Technical Stack
37
 
38
+ - **Backend**: FastAPI with Python 3.11
39
+ - **AI Model**: Llama 3 8B via Ollama (local inference)
40
+ - **Frontend**: Vanilla JavaScript with modern CSS
41
+ - **APIs**: CryptoCompare, DeFiLlama, Etherscan, CoinGecko
42
+ - **Safety**: Custom AI safety module with content filtering
43
+ - **Deployment**: Docker for HuggingFace Spaces
44
 
45
+ ## 🚀 Usage
46
 
47
+ Ask questions like:
48
+ - "Analyze Bitcoin price trends and institutional adoption patterns"
49
+ - "Compare top DeFi protocols by TVL and yield metrics"
50
+ - "What are the current Ethereum gas fees?"
51
+ - "Track whale movements in Bitcoin today"
52
 
53
+ ## 🔧 Development
54
 
55
+ ### Local Setup
56
  ```bash
57
+ # Clone the repository
58
+ git clone https://huggingface.co/spaces/your-username/web3-research-copilot
59
+ cd web3-research-copilot
 
60
 
61
+ # Install dependencies
62
+ pip install -r requirements.txt
 
63
 
64
+ # Start Ollama (in separate terminal)
65
+ ollama serve
66
+ ollama pull llama3:8b
67
 
68
+ # Run the application
69
+ python app.py
70
  ```
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
71
 
72
+ ### Docker Deployment
 
 
 
 
 
 
 
 
 
 
 
 
 
 
73
  ```bash
74
+ # Build and run with Docker
75
+ docker build -f Dockerfile.hf -t web3-copilot .
76
+ docker run -p 7860:7860 -p 11434:11434 web3-copilot
77
  ```
78
 
79
+ ## 📁 Project Structure
 
 
 
 
80
 
81
+ ```
82
+ ├── app.py # Main FastAPI application
83
+ ├── templates/ # HTML templates
84
+ ├── static/ # CSS and JavaScript files
85
+ ├── src/
86
+ │ ├── agent/ # AI research agent
87
+ │ ├── tools/ # API integration tools
88
+ │ └── utils/ # Configuration and safety
89
+ ├── Dockerfile.hf # HuggingFace Spaces Docker config
90
+ └── requirements.txt # Python dependencies
91
  ```
92
 
93
+ ## 🛡️ AI Safety Features
94
 
95
+ - Input sanitization and validation
96
+ - Rate limiting protection
97
+ - Content filtering for harmful requests
98
+ - Response safety validation
99
+ - Comprehensive logging for monitoring
100
 
101
+ ## 📊 Supported APIs
102
 
103
+ - **CryptoCompare**: Price data and market statistics
104
+ - **DeFiLlama**: Protocol TVL and DeFi analytics
105
+ - **Etherscan**: Ethereum network data and gas prices
106
+ - **CoinGecko**: Cryptocurrency market data
107
+ - **Custom Chart Data**: Historical price analysis
108
 
109
+ ## 🤝 Contributing
 
110
 
111
+ This project implements responsible AI practices and focuses on legitimate cryptocurrency research and education.
112
 
113
+ ## 📄 License
 
 
 
 
114
 
115
+ MIT License - see LICENSE file for details
116
 
117
+ ---
 
 
 
118
 
119
+ Built with ❤️ for the crypto research community
app.py CHANGED
@@ -1,11 +1,12 @@
1
  from fastapi import FastAPI, HTTPException, Request
2
  from fastapi.staticfiles import StaticFiles
3
  from fastapi.templating import Jinja2Templates
4
- from fastapi.responses import HTMLResponse, JSONResponse
5
  from pydantic import BaseModel
6
  import asyncio
7
  import json
8
  from datetime import datetime
 
9
  from typing import List, Dict, Any, Optional
10
  import os
11
  from dotenv import load_dotenv
@@ -28,10 +29,15 @@ app = FastAPI(
28
  version="2.0.0"
29
  )
30
 
 
 
 
 
31
  # Pydantic models
32
  class QueryRequest(BaseModel):
33
  query: str
34
  chat_history: Optional[List[Dict[str, str]]] = []
 
35
 
36
  class QueryResponse(BaseModel):
37
  success: bool
@@ -44,44 +50,57 @@ class QueryResponse(BaseModel):
44
  class Web3CoPilotService:
45
  def __init__(self):
46
  try:
47
- logger.info("Initializing Web3 Research Co-Pilot...")
48
 
49
- if config.GEMINI_API_KEY:
50
- logger.info("Initializing AI research agent...")
 
51
  self.agent = Web3ResearchAgent()
52
- logger.info("AI research agent initialized")
53
  else:
54
- logger.warning("GEMINI_API_KEY not configured - limited functionality")
55
  self.agent = None
 
56
 
57
- logger.info("Initializing integrations...")
58
- self.airaa = AIRAAIntegration()
59
-
60
- self.enabled = bool(config.GEMINI_API_KEY)
61
- self.visualizer = CryptoVisualizations()
 
 
62
 
63
- logger.info(f"Service initialized (AI enabled: {self.enabled})")
 
 
 
 
 
 
 
64
 
65
  except Exception as e:
66
- logger.error(f"Service initialization failed: {e}")
 
67
  self.agent = None
68
  self.airaa = None
69
- self.enabled = False
70
- self.visualizer = CryptoVisualizations()
71
 
72
- async def process_query(self, query: str) -> QueryResponse:
73
- """Process research query with visualizations"""
74
- logger.info(f"Processing query: {query[:100]}...")
75
 
76
  if not query.strip():
 
77
  return QueryResponse(
78
- success=False,
79
  response="Please provide a research query.",
80
  error="Empty query"
81
  )
82
-
83
  try:
84
  if not self.enabled:
 
85
  response = """**Research Assistant - Limited Mode**
86
 
87
  API access available for basic cryptocurrency data:
@@ -92,43 +111,61 @@ API access available for basic cryptocurrency data:
92
  Configure GEMINI_API_KEY environment variable for full AI analysis."""
93
  return QueryResponse(success=True, response=response, sources=["System"])
94
 
95
- logger.info("Processing with AI research agent...")
96
- result = await self.agent.research_query(query)
 
 
 
97
 
98
  if result.get("success"):
99
  response = result.get("result", "No analysis generated")
100
  sources = result.get("sources", [])
101
  metadata = result.get("metadata", {})
102
 
103
- # Generate visualizations if relevant data is available
 
 
104
  visualizations = []
 
 
 
 
 
 
 
 
 
 
 
105
  if metadata:
 
106
  vis_html = await self._generate_visualizations(metadata, query)
107
  if vis_html:
108
  visualizations.append(vis_html)
 
109
 
110
  # Send to AIRAA if enabled
111
  if self.airaa and self.airaa.enabled:
112
  try:
113
  await self.airaa.send_research_data(query, response)
114
- logger.info("Data sent to AIRAA")
115
  except Exception as e:
116
- logger.warning(f"AIRAA integration failed: {e}")
117
 
118
  return QueryResponse(
119
  success=True,
120
- response=response,
121
  sources=sources,
122
  metadata=metadata,
123
  visualizations=visualizations
124
  )
125
  else:
126
  error_msg = result.get("error", "Research analysis failed")
127
- logger.error(f"Research failed: {error_msg}")
128
  return QueryResponse(success=False, response=error_msg, error=error_msg)
129
 
130
  except Exception as e:
131
- logger.error(f"Query processing error: {e}")
132
  error_msg = f"Processing error: {str(e)}"
133
  return QueryResponse(success=False, response=error_msg, error=error_msg)
134
 
@@ -165,616 +202,197 @@ Configure GEMINI_API_KEY environment variable for full AI analysis."""
165
  if symbol in query_upper:
166
  return symbol
167
  return 'BTC' # Default
168
-
169
- # Initialize service
170
- service = Web3CoPilotService()
171
-
172
- @app.get("/", response_class=HTMLResponse)
173
- async def get_homepage(request: Request):
174
- """Serve minimalist, professional interface"""
175
- html_content = """
176
- <!DOCTYPE html>
177
- <html lang="en">
178
- <head>
179
- <meta charset="UTF-8">
180
- <meta name="viewport" content="width=device-width, initial-scale=1.0">
181
- <title>Web3 Research Co-Pilot</title>
182
- <link rel="icon" href="data:image/svg+xml,<svg xmlns=%22http://www.w3.org/2000/svg%22 viewBox=%220 0 24 24%22><path fill=%22%2300d4aa%22 d=%22M12 2L2 7v10c0 5.5 3.8 7.7 9 9 5.2-1.3 9-3.5 9-9V7l-10-5z%22/></svg>">
183
-
184
- <style>
185
- :root {
186
- --primary: #0066ff;
187
- --primary-dark: #0052cc;
188
- --accent: #00d4aa;
189
- --background: #000000;
190
- --surface: #111111;
191
- --surface-elevated: #1a1a1a;
192
- --text: #ffffff;
193
- --text-secondary: #a0a0a0;
194
- --text-muted: #666666;
195
- --border: rgba(255, 255, 255, 0.08);
196
- --border-focus: rgba(0, 102, 255, 0.3);
197
- --shadow: rgba(0, 0, 0, 0.4);
198
- --success: #00d4aa;
199
- --warning: #ffa726;
200
- --error: #f44336;
201
- }
202
-
203
- * {
204
- margin: 0;
205
- padding: 0;
206
- box-sizing: border-box;
207
- }
208
-
209
- body {
210
- font-family: -apple-system, BlinkMacSystemFont, 'SF Pro Display', system-ui, sans-serif;
211
- background: var(--background);
212
- color: var(--text);
213
- line-height: 1.5;
214
- min-height: 100vh;
215
- font-weight: 400;
216
- -webkit-font-smoothing: antialiased;
217
- -moz-osx-font-smoothing: grayscale;
218
- }
219
-
220
- .container {
221
- max-width: 1000px;
222
- margin: 0 auto;
223
- padding: 2rem 1.5rem;
224
- }
225
-
226
- .header {
227
- text-align: center;
228
- margin-bottom: 2.5rem;
229
- }
230
-
231
- .header h1 {
232
- font-size: 2.25rem;
233
- font-weight: 600;
234
- color: var(--text);
235
- margin-bottom: 0.5rem;
236
- letter-spacing: -0.025em;
237
- }
238
-
239
- .header .brand {
240
- color: var(--primary);
241
- }
242
-
243
- .header p {
244
- color: var(--text-secondary);
245
- font-size: 1rem;
246
- font-weight: 400;
247
- }
248
-
249
- .status {
250
- background: var(--surface);
251
- border: 1px solid var(--border);
252
- border-radius: 12px;
253
- padding: 1rem 1.5rem;
254
- margin-bottom: 2rem;
255
- text-align: center;
256
- transition: all 0.2s ease;
257
- }
258
-
259
- .status.online {
260
- border-color: var(--success);
261
- background: linear-gradient(135deg, rgba(0, 212, 170, 0.05), rgba(0, 212, 170, 0.02));
262
- }
263
-
264
- .status.offline {
265
- border-color: var(--error);
266
- background: linear-gradient(135deg, rgba(244, 67, 54, 0.05), rgba(244, 67, 54, 0.02));
267
- }
268
-
269
- .status.checking {
270
- border-color: var(--warning);
271
- background: linear-gradient(135deg, rgba(255, 167, 38, 0.05), rgba(255, 167, 38, 0.02));
272
- animation: pulse 2s infinite;
273
- }
274
-
275
- @keyframes pulse {
276
- 0%, 100% { opacity: 1; }
277
- 50% { opacity: 0.8; }
278
- }
279
-
280
- .chat-interface {
281
- background: var(--surface);
282
- border: 1px solid var(--border);
283
- border-radius: 16px;
284
- overflow: hidden;
285
- margin-bottom: 2rem;
286
- backdrop-filter: blur(20px);
287
- }
288
-
289
- .chat-messages {
290
- height: 480px;
291
- overflow-y: auto;
292
- padding: 2rem;
293
- background: linear-gradient(180deg, var(--background), var(--surface));
294
- }
295
-
296
- .chat-messages::-webkit-scrollbar {
297
- width: 3px;
298
- }
299
-
300
- .chat-messages::-webkit-scrollbar-track {
301
- background: transparent;
302
- }
303
-
304
- .chat-messages::-webkit-scrollbar-thumb {
305
- background: var(--border);
306
- border-radius: 2px;
307
- }
308
-
309
- .message {
310
- margin-bottom: 2rem;
311
- opacity: 0;
312
- animation: messageSlide 0.4s cubic-bezier(0.2, 0, 0.2, 1) forwards;
313
- }
314
-
315
- @keyframes messageSlide {
316
- from {
317
- opacity: 0;
318
- transform: translateY(20px) scale(0.98);
319
- }
320
- to {
321
- opacity: 1;
322
- transform: translateY(0) scale(1);
323
- }
324
- }
325
-
326
- .message.user {
327
- text-align: right;
328
- }
329
-
330
- .message.assistant {
331
- text-align: left;
332
- }
333
-
334
- .message-content {
335
- display: inline-block;
336
- max-width: 75%;
337
- padding: 1.25rem 1.5rem;
338
- border-radius: 24px;
339
- font-size: 0.95rem;
340
- line-height: 1.6;
341
- position: relative;
342
- }
343
-
344
- .message.user .message-content {
345
- background: linear-gradient(135deg, var(--primary), var(--primary-dark));
346
- color: #ffffff;
347
- border-bottom-right-radius: 8px;
348
- box-shadow: 0 4px 12px rgba(0, 102, 255, 0.2);
349
- }
350
-
351
- .message.assistant .message-content {
352
- background: var(--surface-elevated);
353
- color: var(--text);
354
- border-bottom-left-radius: 8px;
355
- border: 1px solid var(--border);
356
- }
357
-
358
- .message-meta {
359
- font-size: 0.75rem;
360
- color: var(--text-muted);
361
- margin-top: 0.5rem;
362
- font-weight: 500;
363
- }
364
-
365
- .sources {
366
- margin-top: 1rem;
367
- padding-top: 1rem;
368
- border-top: 1px solid var(--border);
369
- font-size: 0.8rem;
370
- color: var(--text-secondary);
371
- }
372
-
373
- .sources span {
374
- display: inline-block;
375
- background: rgba(0, 102, 255, 0.1);
376
- border: 1px solid rgba(0, 102, 255, 0.2);
377
- padding: 0.25rem 0.75rem;
378
- border-radius: 6px;
379
- margin: 0.25rem 0.5rem 0.25rem 0;
380
- font-weight: 500;
381
- font-size: 0.75rem;
382
- }
383
-
384
- .input-area {
385
- padding: 2rem;
386
- background: linear-gradient(180deg, var(--surface), var(--surface-elevated));
387
- border-top: 1px solid var(--border);
388
- }
389
-
390
- .input-container {
391
- display: flex;
392
- gap: 1rem;
393
- align-items: stretch;
394
- }
395
-
396
- .input-field {
397
- flex: 1;
398
- padding: 1rem 1.5rem;
399
- background: var(--background);
400
- border: 2px solid var(--border);
401
- border-radius: 28px;
402
- color: var(--text);
403
- font-size: 0.95rem;
404
- outline: none;
405
- transition: all 0.2s cubic-bezier(0.2, 0, 0.2, 1);
406
- font-weight: 400;
407
- }
408
-
409
- .input-field:focus {
410
- border-color: var(--primary);
411
- box-shadow: 0 0 0 4px var(--border-focus);
412
- background: var(--surface);
413
- }
414
-
415
- .input-field::placeholder {
416
- color: var(--text-muted);
417
- font-weight: 400;
418
- }
419
-
420
- .send-button {
421
- padding: 1rem 2rem;
422
- background: linear-gradient(135deg, var(--primary), var(--primary-dark));
423
- color: #ffffff;
424
- border: none;
425
- border-radius: 28px;
426
- font-weight: 600;
427
- cursor: pointer;
428
- transition: all 0.2s cubic-bezier(0.2, 0, 0.2, 1);
429
- font-size: 0.95rem;
430
- box-shadow: 0 4px 12px rgba(0, 102, 255, 0.2);
431
- }
432
-
433
- .send-button:hover:not(:disabled) {
434
- transform: translateY(-2px);
435
- box-shadow: 0 8px 24px rgba(0, 102, 255, 0.3);
436
- }
437
-
438
- .send-button:active {
439
- transform: translateY(0);
440
- }
441
-
442
- .send-button:disabled {
443
- opacity: 0.6;
444
- cursor: not-allowed;
445
- transform: none;
446
- box-shadow: 0 4px 12px rgba(0, 102, 255, 0.1);
447
- }
448
-
449
- .examples {
450
- display: grid;
451
- grid-template-columns: repeat(auto-fit, minmax(300px, 1fr));
452
- gap: 1rem;
453
- margin-top: 1rem;
454
- }
455
-
456
- .example {
457
- background: linear-gradient(135deg, var(--surface), var(--surface-elevated));
458
- border: 1px solid var(--border);
459
- border-radius: 12px;
460
- padding: 1.5rem;
461
- cursor: pointer;
462
- transition: all 0.3s cubic-bezier(0.2, 0, 0.2, 1);
463
- position: relative;
464
- overflow: hidden;
465
- }
466
-
467
- .example::before {
468
- content: '';
469
- position: absolute;
470
- top: 0;
471
- left: -100%;
472
- width: 100%;
473
- height: 100%;
474
- background: linear-gradient(90deg, transparent, rgba(0, 102, 255, 0.05), transparent);
475
- transition: left 0.5s ease;
476
- }
477
-
478
- .example:hover::before {
479
- left: 100%;
480
- }
481
-
482
- .example:hover {
483
- border-color: var(--primary);
484
- transform: translateY(-4px);
485
- box-shadow: 0 12px 32px rgba(0, 0, 0, 0.2);
486
- background: linear-gradient(135deg, var(--surface-elevated), var(--surface));
487
- }
488
-
489
- .example-title {
490
- font-weight: 600;
491
- color: var(--text);
492
- margin-bottom: 0.5rem;
493
- font-size: 0.95rem;
494
- }
495
-
496
- .example-desc {
497
- font-size: 0.85rem;
498
- color: var(--text-secondary);
499
- font-weight: 400;
500
- }
501
-
502
- .loading {
503
- display: inline-flex;
504
- align-items: center;
505
- gap: 0.5rem;
506
- color: var(--text-secondary);
507
- font-weight: 500;
508
- }
509
-
510
- .loading::after {
511
- content: '';
512
- width: 14px;
513
- height: 14px;
514
- border: 2px solid currentColor;
515
- border-top-color: transparent;
516
- border-radius: 50%;
517
- animation: spin 1s linear infinite;
518
- }
519
-
520
- @keyframes spin {
521
- to { transform: rotate(360deg); }
522
- }
523
-
524
- .visualization-container {
525
- margin: 1.5rem 0;
526
- background: var(--surface-elevated);
527
- border-radius: 12px;
528
- padding: 1.5rem;
529
- border: 1px solid var(--border);
530
- }
531
-
532
- .welcome {
533
- text-align: center;
534
- padding: 4rem 2rem;
535
- color: var(--text-secondary);
536
- }
537
-
538
- .welcome h3 {
539
- font-size: 1.25rem;
540
- font-weight: 600;
541
- margin-bottom: 0.5rem;
542
- color: var(--text);
543
- }
544
-
545
- .welcome p {
546
- font-size: 0.95rem;
547
- font-weight: 400;
548
- }
549
-
550
- @media (max-width: 768px) {
551
- .container {
552
- padding: 1rem;
553
- }
554
-
555
- .header h1 {
556
- font-size: 1.75rem;
557
- }
558
-
559
- .chat-messages {
560
- height: 400px;
561
- padding: 1.5rem;
562
- }
563
-
564
- .message-content {
565
- max-width: 85%;
566
- padding: 1rem 1.25rem;
567
- }
568
-
569
- .input-area {
570
- padding: 1.5rem;
571
- }
572
-
573
- .input-container {
574
- flex-direction: column;
575
- gap: 0.75rem;
576
- }
577
-
578
- .send-button {
579
- align-self: stretch;
580
- }
581
-
582
- .examples {
583
- grid-template-columns: 1fr;
584
- }
585
- }
586
- </style>
587
- </head>
588
- <body>
589
- <div class="container">
590
- <div class="header">
591
- <h1><span class="brand">Web3</span> Research Co-Pilot</h1>
592
- <p>Professional cryptocurrency analysis and market intelligence</p>
593
- </div>
594
-
595
- <div id="status" class="status checking">
596
- <span>Initializing research systems...</span>
597
- </div>
598
-
599
- <div class="chat-interface">
600
- <div id="chatMessages" class="chat-messages">
601
- <div class="welcome">
602
- <h3>Welcome to Web3 Research Co-Pilot</h3>
603
- <p>Ask about market trends, DeFi protocols, or blockchain analytics</p>
604
- </div>
605
- </div>
606
- <div class="input-area">
607
- <div class="input-container">
608
- <input
609
- type="text"
610
- id="queryInput"
611
- class="input-field"
612
- placeholder="Research Bitcoin trends, analyze DeFi yields, compare protocols..."
613
- maxlength="500"
614
- >
615
- <button id="sendBtn" class="send-button">Research</button>
616
- </div>
617
- </div>
618
- </div>
619
-
620
- <div class="examples">
621
- <div class="example" onclick="setQuery('Analyze Bitcoin price trends and institutional adoption patterns')">
622
- <div class="example-title">Market Analysis</div>
623
- <div class="example-desc">Bitcoin trends, institutional flows, and market sentiment</div>
624
- </div>
625
- <div class="example" onclick="setQuery('Compare top DeFi protocols by TVL, yield, and risk metrics')">
626
- <div class="example-title">DeFi Intelligence</div>
627
- <div class="example-desc">Protocol comparison, yield analysis, and risk assessment</div>
628
- </div>
629
- <div class="example" onclick="setQuery('Evaluate Ethereum Layer 2 scaling solutions and adoption metrics')">
630
- <div class="example-title">Layer 2 Research</div>
631
- <div class="example-desc">Scaling solutions, transaction costs, and ecosystem growth</div>
632
- </div>
633
- <div class="example" onclick="setQuery('Identify optimal yield farming strategies across multiple chains')">
634
- <div class="example-title">Yield Optimization</div>
635
- <div class="example-desc">Cross-chain opportunities, APY tracking, and risk analysis</div>
636
- </div>
637
- </div>
638
- </div>
639
-
640
- <script>
641
- let chatHistory = [];
642
- let messageCount = 0;
643
-
644
- async function checkStatus() {
645
- try {
646
- const response = await fetch('/status');
647
- const status = await response.json();
648
 
649
- const statusDiv = document.getElementById('status');
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
650
 
651
- if (status.enabled && status.gemini_configured) {
652
- statusDiv.className = 'status online';
653
- statusDiv.innerHTML = `
654
- <span>Research systems online</span>
655
- <div style="margin-top: 0.5rem; font-size: 0.85rem; opacity: 0.8;">
656
- Tools: ${status.tools_available.join(' • ')}
657
- </div>
658
- `;
659
- } else {
660
- statusDiv.className = 'status offline';
661
- statusDiv.innerHTML = `
662
- <span>Limited mode - Configure GEMINI_API_KEY for full functionality</span>
663
- <div style="margin-top: 0.5rem; font-size: 0.85rem; opacity: 0.8;">
664
- Available: ${status.tools_available.join(' • ')}
665
- </div>
666
- `;
667
- }
668
- } catch (error) {
669
- const statusDiv = document.getElementById('status');
670
- statusDiv.className = 'status offline';
671
- statusDiv.innerHTML = '<span>Connection error</span>';
672
- }
673
- }
674
-
675
- async function sendQuery() {
676
- const input = document.getElementById('queryInput');
677
- const sendBtn = document.getElementById('sendBtn');
678
- const query = input.value.trim();
679
-
680
- if (!query) return;
681
-
682
- addMessage('user', query);
683
- input.value = '';
684
-
685
- sendBtn.disabled = true;
686
- sendBtn.innerHTML = '<span class="loading">Processing</span>';
687
-
688
- try {
689
- const response = await fetch('/query', {
690
- method: 'POST',
691
- headers: { 'Content-Type': 'application/json' },
692
- body: JSON.stringify({ query, chat_history: chatHistory })
693
- });
694
-
695
- const result = await response.json();
696
-
697
- if (result.success) {
698
- addMessage('assistant', result.response, result.sources, result.visualizations);
699
- } else {
700
- addMessage('assistant', result.response || 'Analysis failed. Please try again.');
701
- }
702
- } catch (error) {
703
- addMessage('assistant', 'Connection error. Please check your network and try again.');
704
- } finally {
705
- sendBtn.disabled = false;
706
- sendBtn.innerHTML = 'Research';
707
- input.focus();
708
- }
709
- }
710
-
711
- function addMessage(sender, content, sources = [], visualizations = []) {
712
- const messagesDiv = document.getElementById('chatMessages');
713
 
714
- // Clear welcome message
715
- if (messageCount === 0) {
716
- messagesDiv.innerHTML = '';
717
- }
718
- messageCount++;
719
-
720
- const messageDiv = document.createElement('div');
721
- messageDiv.className = `message ${sender}`;
722
-
723
- let sourcesHtml = '';
724
- if (sources && sources.length > 0) {
725
- sourcesHtml = `
726
- <div class="sources">
727
- Sources: ${sources.map(s => `<span>${s}</span>`).join('')}
728
- </div>
729
- `;
730
- }
731
-
732
- let visualizationHtml = '';
733
- if (visualizations && visualizations.length > 0) {
734
- visualizationHtml = visualizations.map(viz =>
735
- `<div class="visualization-container">${viz}</div>`
736
- ).join('');
737
- }
738
-
739
- messageDiv.innerHTML = `
740
- <div class="message-content">
741
- ${content.replace(/\n/g, '<br>')}
742
- ${sourcesHtml}
743
- </div>
744
- ${visualizationHtml}
745
- <div class="message-meta">${new Date().toLocaleTimeString()}</div>
746
- `;
747
-
748
- messagesDiv.appendChild(messageDiv);
749
- messagesDiv.scrollTop = messagesDiv.scrollHeight;
750
-
751
- chatHistory.push({ role: sender, content });
752
- if (chatHistory.length > 20) chatHistory = chatHistory.slice(-20);
753
- }
754
-
755
- function setQuery(query) {
756
- document.getElementById('queryInput').value = query;
757
- setTimeout(() => sendQuery(), 100);
758
- }
759
-
760
- // Event listeners
761
- document.getElementById('queryInput').addEventListener('keypress', (e) => {
762
- if (e.key === 'Enter') sendQuery();
763
- });
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
764
 
765
- document.getElementById('sendBtn').addEventListener('click', sendQuery);
 
766
 
767
- // Initialize
768
- document.addEventListener('DOMContentLoaded', () => {
769
- checkStatus();
770
- document.getElementById('queryInput').focus();
771
- });
772
- </script>
773
- </body>
774
- </html>
775
- """
776
- return HTMLResponse(content=html_content)
777
 
 
 
 
 
778
  @app.get("/status")
779
  async def get_status():
780
  """System status endpoint"""
@@ -790,8 +408,147 @@ async def get_status():
790
 
791
  @app.post("/query", response_model=QueryResponse)
792
  async def process_query(request: QueryRequest):
793
- """Process research query"""
794
- return await service.process_query(request.query)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
795
 
796
  @app.get("/health")
797
  async def health_check():
@@ -803,6 +560,54 @@ async def health_check():
803
  "version": "2.0.0"
804
  }
805
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
806
  if __name__ == "__main__":
807
  import uvicorn
808
  logger.info("Starting Web3 Research Co-Pilot...")
 
1
  from fastapi import FastAPI, HTTPException, Request
2
  from fastapi.staticfiles import StaticFiles
3
  from fastapi.templating import Jinja2Templates
4
+ from fastapi.responses import HTMLResponse, JSONResponse, StreamingResponse
5
  from pydantic import BaseModel
6
  import asyncio
7
  import json
8
  from datetime import datetime
9
+ import time
10
  from typing import List, Dict, Any, Optional
11
  import os
12
  from dotenv import load_dotenv
 
29
  version="2.0.0"
30
  )
31
 
32
+ # Mount static files and templates
33
+ app.mount("/static", StaticFiles(directory="static"), name="static")
34
+ templates = Jinja2Templates(directory="templates")
35
+
36
  # Pydantic models
37
  class QueryRequest(BaseModel):
38
  query: str
39
  chat_history: Optional[List[Dict[str, str]]] = []
40
+ use_gemini: bool = False
41
 
42
  class QueryResponse(BaseModel):
43
  success: bool
 
50
  class Web3CoPilotService:
51
  def __init__(self):
52
  try:
53
+ logger.info("Initializing Web3 Research Service...")
54
 
55
+ # Initialize research agent (supports Ollama-only mode)
56
+ if config.USE_OLLAMA_ONLY or config.GEMINI_API_KEY:
57
+ logger.info("AI research capabilities enabled")
58
  self.agent = Web3ResearchAgent()
59
+ self.enabled = self.agent.enabled
60
  else:
61
+ logger.info("AI research capabilities disabled - configuration required")
62
  self.agent = None
63
+ self.enabled = False
64
 
65
+ # Initialize integrations
66
+ logger.info("Initializing external integrations...")
67
+ try:
68
+ self.airaa = AIRAAIntegration()
69
+ except Exception as e:
70
+ logger.warning("External integration unavailable")
71
+ self.airaa = None
72
 
73
+ # Initialize visualization tools
74
+ try:
75
+ self.viz = CryptoVisualizations()
76
+ except Exception as e:
77
+ logger.warning("Visualization tools unavailable")
78
+ self.viz = None
79
+
80
+ logger.info(f"Service initialized successfully (AI enabled: {self.enabled})")
81
 
82
  except Exception as e:
83
+ logger.error(f"Service initialization failed")
84
+ self.enabled = False
85
  self.agent = None
86
  self.airaa = None
87
+ self.viz = None
 
88
 
89
+ async def process_query(self, query: str, use_gemini: bool = False) -> QueryResponse:
90
+ """Process research query with comprehensive analysis"""
91
+ logger.info("Processing research request...")
92
 
93
  if not query.strip():
94
+ logger.warning("Empty query received")
95
  return QueryResponse(
96
+ success=False,
97
  response="Please provide a research query.",
98
  error="Empty query"
99
  )
100
+
101
  try:
102
  if not self.enabled:
103
+ logger.info("Processing in limited mode")
104
  response = """**Research Assistant - Limited Mode**
105
 
106
  API access available for basic cryptocurrency data:
 
111
  Configure GEMINI_API_KEY environment variable for full AI analysis."""
112
  return QueryResponse(success=True, response=response, sources=["System"])
113
 
114
+ logger.info("🤖 Processing with AI research agent...")
115
+ logger.info(f"🛠️ Available tools: {[tool.name for tool in self.agent.tools] if self.agent else []}")
116
+
117
+ result = await self.agent.research_query(query, use_gemini=use_gemini)
118
+ logger.info(f"🔄 Agent research completed: success={result.get('success')}")
119
 
120
  if result.get("success"):
121
  response = result.get("result", "No analysis generated")
122
  sources = result.get("sources", [])
123
  metadata = result.get("metadata", {})
124
 
125
+ logger.info(f"📊 Response generated: {len(response)} chars, {len(sources)} sources")
126
+
127
+ # Check for chart data and generate visualizations
128
  visualizations = []
129
+ chart_data = await self._extract_chart_data_from_response(response)
130
+ if chart_data:
131
+ chart_html = await self._generate_chart_from_data(chart_data)
132
+ if chart_html:
133
+ visualizations.append(chart_html)
134
+ logger.info("✅ Chart generated from structured data")
135
+
136
+ # Clean the response for user display
137
+ cleaned_response = self._clean_agent_response(response)
138
+
139
+ # Generate visualizations if relevant data is available
140
  if metadata:
141
+ logger.info("📈 Checking for visualization data...")
142
  vis_html = await self._generate_visualizations(metadata, query)
143
  if vis_html:
144
  visualizations.append(vis_html)
145
+ logger.info("✅ Visualization generated")
146
 
147
  # Send to AIRAA if enabled
148
  if self.airaa and self.airaa.enabled:
149
  try:
150
  await self.airaa.send_research_data(query, response)
151
+ logger.info("📤 Data sent to AIRAA")
152
  except Exception as e:
153
+ logger.warning(f"⚠️ AIRAA integration failed: {e}")
154
 
155
  return QueryResponse(
156
  success=True,
157
+ response=cleaned_response,
158
  sources=sources,
159
  metadata=metadata,
160
  visualizations=visualizations
161
  )
162
  else:
163
  error_msg = result.get("error", "Research analysis failed")
164
+ logger.error(f"Research failed: {error_msg}")
165
  return QueryResponse(success=False, response=error_msg, error=error_msg)
166
 
167
  except Exception as e:
168
+ logger.error(f"💥 Query processing error: {e}", exc_info=True)
169
  error_msg = f"Processing error: {str(e)}"
170
  return QueryResponse(success=False, response=error_msg, error=error_msg)
171
 
 
202
  if symbol in query_upper:
203
  return symbol
204
  return 'BTC' # Default
205
+
206
+ async def _extract_chart_data_from_response(self, response: str) -> Optional[Dict[str, Any]]:
207
+ """Extract chart data JSON from agent response"""
208
+ try:
209
+ import re
210
+ import json
211
+
212
+ logger.info(f"🔍 Checking response for chart data (length: {len(response)} chars)")
213
+
214
+ # Look for JSON objects containing chart_type - find opening brace and matching closing brace
215
+ chart_data_found = None
216
+ lines = response.split('\n')
217
+
218
+ for i, line in enumerate(lines):
219
+ if '"chart_type"' in line and line.strip().startswith('{'):
220
+ # Found potential start of chart JSON
221
+ json_start = i
222
+ brace_count = 0
223
+ json_lines = []
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
224
 
225
+ for j in range(i, len(lines)):
226
+ current_line = lines[j]
227
+ json_lines.append(current_line)
228
+
229
+ # Count braces to find matching close
230
+ brace_count += current_line.count('{') - current_line.count('}')
231
+
232
+ if brace_count == 0:
233
+ # Found complete JSON object
234
+ json_text = '\n'.join(json_lines)
235
+ try:
236
+ chart_data = json.loads(json_text.strip())
237
+ if chart_data.get("chart_type") and chart_data.get("chart_type") != "error":
238
+ logger.info(f"✅ Found valid chart data: {chart_data.get('chart_type')}")
239
+ return chart_data
240
+ except json.JSONDecodeError:
241
+ # Try without newlines
242
+ try:
243
+ json_text_clean = json_text.replace('\n', '').replace(' ', ' ')
244
+ chart_data = json.loads(json_text_clean)
245
+ if chart_data.get("chart_type") and chart_data.get("chart_type") != "error":
246
+ logger.info(f"✅ Found valid chart data (cleaned): {chart_data.get('chart_type')}")
247
+ return chart_data
248
+ except json.JSONDecodeError:
249
+ continue
250
+ break
251
+
252
+ # Fallback to original regex approach for single-line JSON
253
+ json_pattern = r'\{[^{}]*"chart_type"[^{}]*\}|\{(?:[^{}]|\{[^{}]*\})*"chart_type"(?:[^{}]|\{[^{}]*\})*\}'
254
+ matches = re.findall(json_pattern, response, re.DOTALL)
255
+
256
+ logger.info(f" Found {len(matches)} potential chart data objects")
257
+
258
+ for match in matches:
259
+ try:
260
+ # Clean up the JSON
261
+ cleaned_match = match.replace('\\"', '"').replace('\\n', '\n')
262
+ chart_data = json.loads(cleaned_match)
263
 
264
+ if chart_data.get("chart_type") and chart_data.get("chart_type") != "error":
265
+ logger.info(f"✅ Valid chart data found: {chart_data.get('chart_type')}")
266
+ return chart_data
267
+
268
+ except json.JSONDecodeError:
269
+ continue
270
+
271
+ logger.info("⚠️ No valid chart data found in response")
272
+ return None
273
+
274
+ except Exception as e:
275
+ logger.error(f"Chart data extraction error: {e}")
276
+ return None
277
+
278
+ async def _generate_chart_from_data(self, chart_data: Dict[str, Any]) -> Optional[str]:
279
+ """Generate HTML visualization from chart data"""
280
+ try:
281
+ if not self.viz:
282
+ logger.warning("Visualization tools not available")
283
+ return None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
284
 
285
+ chart_type = chart_data.get("chart_type")
286
+ data = chart_data.get("data", {})
287
+ config = chart_data.get("config", {})
288
+
289
+ logger.info(f"Generating {chart_type} chart with data keys: {list(data.keys())}")
290
+
291
+ if chart_type == "price_chart":
292
+ fig = self.viz.create_price_chart(data, data.get("symbol", "BTC"))
293
+ elif chart_type == "market_overview":
294
+ fig = self.viz.create_market_overview(data.get("coins", []))
295
+ elif chart_type == "defi_tvl":
296
+ fig = self.viz.create_defi_tvl_chart(data.get("protocols", []))
297
+ elif chart_type == "portfolio_pie":
298
+ # Convert allocation data to the expected format
299
+ allocations = {item["name"]: item["value"] for item in data.get("allocations", [])}
300
+ fig = self.viz.create_portfolio_pie_chart(allocations)
301
+ elif chart_type == "gas_tracker":
302
+ fig = self.viz.create_gas_tracker(data)
303
+ else:
304
+ logger.warning(f"Unknown chart type: {chart_type}")
305
+ return None
306
+
307
+ # Convert to HTML - use div_id and config for embedding
308
+ chart_id = f'chart_{chart_type}_{int(time.time())}'
309
+
310
+ # Generate HTML with inline Plotly for reliable rendering
311
+ html = fig.to_html(
312
+ include_plotlyjs='inline', # Embed Plotly directly - no CDN issues
313
+ div_id=chart_id,
314
+ config={'responsive': True, 'displayModeBar': False}
315
+ )
316
+
317
+ # With inline Plotly, we need to extract the body content only
318
+ import re
319
+ # Extract everything between <body> and </body>
320
+ body_match = re.search(r'<body[^>]*>(.*?)</body>', html, re.DOTALL)
321
+ if body_match:
322
+ chart_html = body_match.group(1).strip()
323
+ logger.info(f"✅ Chart HTML generated ({len(chart_html)} chars) - inline format")
324
+ return chart_html
325
+ else:
326
+ # Fallback - return the full HTML minus the html/head/body tags
327
+ # Remove full document structure, keep only the content
328
+ cleaned_html = re.sub(r'<html[^>]*>.*?<body[^>]*>', '', html, flags=re.DOTALL)
329
+ cleaned_html = re.sub(r'</body>.*?</html>', '', cleaned_html, flags=re.DOTALL)
330
+ logger.info(f"✅ Chart HTML generated ({len(cleaned_html)} chars) - cleaned format")
331
+ return cleaned_html.strip()
332
+
333
+ except Exception as e:
334
+ logger.error(f"Chart generation error: {e}")
335
+ return None
336
+ def _clean_agent_response(self, response: str) -> str:
337
+ """Clean agent response by removing JSON data blocks"""
338
+ try:
339
+ import re
340
+
341
+ # Method 1: Remove complete JSON objects with balanced braces that contain chart_type
342
+ lines = response.split('\n')
343
+ cleaned_lines = []
344
+ skip_mode = False
345
+ brace_count = 0
346
+
347
+ for line in lines:
348
+ if not skip_mode:
349
+ if '"chart_type"' in line and line.strip().startswith('{'):
350
+ # Found start of chart JSON - start skipping
351
+ skip_mode = True
352
+ brace_count = line.count('{') - line.count('}')
353
+ if brace_count == 0:
354
+ # Single line JSON, skip this line
355
+ skip_mode = False
356
+ continue
357
+ else:
358
+ cleaned_lines.append(line)
359
+ else:
360
+ # In skip mode - count braces to find end
361
+ brace_count += line.count('{') - line.count('}')
362
+ if brace_count <= 0:
363
+ # Found end of JSON block
364
+ skip_mode = False
365
+ # Skip this line in any case
366
+
367
+ cleaned = '\n'.join(cleaned_lines)
368
+
369
+ # Method 2: Fallback regex for any remaining JSON patterns
370
+ json_patterns = [
371
+ r'\{[^{}]*"chart_type"[^{}]*\}', # Simple single-line JSON
372
+ r'```json\s*\{.*?"chart_type".*?\}\s*```', # Markdown JSON blocks
373
+ ]
374
+
375
+ for pattern in json_patterns:
376
+ cleaned = re.sub(pattern, '', cleaned, flags=re.DOTALL)
377
+
378
+ # Clean up extra whitespace
379
+ cleaned = re.sub(r'\n\s*\n\s*\n+', '\n\n', cleaned)
380
+ cleaned = cleaned.strip()
381
+
382
+ return cleaned
383
+
384
+ except Exception as e:
385
+ logger.error(f"Response cleaning error: {e}")
386
+ return response
387
 
388
+ # Initialize service
389
+ service = Web3CoPilotService()
390
 
 
 
 
 
 
 
 
 
 
 
391
 
392
+ @app.get("/", response_class=HTMLResponse)
393
+ async def get_homepage(request: Request):
394
+ """Serve the main interface using templates"""
395
+ return templates.TemplateResponse("index.html", {"request": request})
396
  @app.get("/status")
397
  async def get_status():
398
  """System status endpoint"""
 
408
 
409
  @app.post("/query", response_model=QueryResponse)
410
  async def process_query(request: QueryRequest):
411
+ """Process research query with sanitized logging"""
412
+ # Log incoming request without exposing sensitive data
413
+ query_preview = request.query[:50] + "..." if len(request.query) > 50 else request.query
414
+ logger.info(f"Query received: {query_preview}")
415
+
416
+ start_time = datetime.now()
417
+
418
+ try:
419
+ # Process the query
420
+ result = await service.process_query(request.query)
421
+
422
+ # Log result without sensitive details
423
+ processing_time = (datetime.now() - start_time).total_seconds()
424
+ logger.info(f"Query processed in {processing_time:.2f}s - Success: {result.success}")
425
+
426
+ if result.success:
427
+ logger.info(f"Response generated: {len(result.response)} characters")
428
+ else:
429
+ logger.info("Query processing failed")
430
+
431
+ return result
432
+
433
+ except Exception as e:
434
+ processing_time = (datetime.now() - start_time).total_seconds()
435
+ logger.error(f"Query processing error after {processing_time:.2f}s")
436
+
437
+ return QueryResponse(
438
+ success=False,
439
+ response="We're experiencing technical difficulties. Please try again in a moment.",
440
+ error="System temporarily unavailable"
441
+ )
442
+
443
+ @app.post("/query/stream")
444
+ async def process_query_stream(request: QueryRequest):
445
+ """Process research query with real-time progress updates"""
446
+ query_preview = request.query[:50] + "..." if len(request.query) > 50 else request.query
447
+ logger.info(f"Streaming query received: {query_preview}")
448
+
449
+ async def generate_progress():
450
+ try:
451
+ # Send initial status
452
+ yield f"data: {json.dumps({'type': 'status', 'message': 'Initializing research...', 'progress': 10})}\n\n"
453
+ await asyncio.sleep(0.1)
454
+
455
+ # Send tool selection status
456
+ yield f"data: {json.dumps({'type': 'status', 'message': 'Analyzing query and selecting tools...', 'progress': 20})}\n\n"
457
+ await asyncio.sleep(0.5)
458
+
459
+ # Send tools status
460
+ if service.agent and service.agent.enabled:
461
+ tools = [tool.name for tool in service.agent.tools]
462
+ yield f"data: {json.dumps({'type': 'tools', 'message': f'Available tools: {tools}', 'progress': 30})}\n\n"
463
+ await asyncio.sleep(0.5)
464
+
465
+ # Send processing status
466
+ yield f"data: {json.dumps({'type': 'status', 'message': 'Executing tools and gathering data...', 'progress': 50})}\n\n"
467
+ await asyncio.sleep(0.5)
468
+
469
+ # Send Ollama/Gemini processing status with heartbeats
470
+ llm_name = "Gemini" if request.use_gemini else "Ollama"
471
+ yield f"data: {json.dumps({'type': 'status', 'message': f'{llm_name} is analyzing data and generating response...', 'progress': 70})}\n\n"
472
+ await asyncio.sleep(1.0)
473
+
474
+ # Send additional heartbeat messages during processing
475
+ yield f"data: {json.dumps({'type': 'status', 'message': f'{llm_name} is thinking deeply about your query...', 'progress': 75})}\n\n"
476
+ await asyncio.sleep(2.0)
477
+
478
+ yield f"data: {json.dumps({'type': 'status', 'message': f'Still processing... {llm_name} generates detailed responses', 'progress': 80})}\n\n"
479
+ await asyncio.sleep(3.0)
480
+
481
+ # Process the actual query with timeout and periodic heartbeats
482
+ start_time = datetime.now()
483
+
484
+ # Create a task for the query processing
485
+ query_task = asyncio.create_task(service.process_query(request.query, request.use_gemini))
486
+
487
+ try:
488
+ # Send periodic heartbeats while waiting for Ollama
489
+ heartbeat_count = 0
490
+ while not query_task.done():
491
+ try:
492
+ # Wait for either completion or timeout
493
+ result = await asyncio.wait_for(asyncio.shield(query_task), timeout=10.0)
494
+ break # Query completed
495
+ except asyncio.TimeoutError:
496
+ # Send heartbeat every 10 seconds
497
+ heartbeat_count += 1
498
+ elapsed = (datetime.now() - start_time).total_seconds()
499
+
500
+ if elapsed > 300: # 5 minute hard timeout
501
+ query_task.cancel()
502
+ raise asyncio.TimeoutError("Hard timeout reached")
503
+
504
+ progress = min(85 + (heartbeat_count * 2), 95) # Progress slowly from 85 to 95
505
+ llm_name = "Gemini" if request.use_gemini else "Ollama"
506
+ yield f"data: {json.dumps({'type': 'status', 'message': f'{llm_name} is still working... ({elapsed:.0f}s elapsed)', 'progress': progress})}\n\n"
507
+
508
+ # If we get here, the query completed successfully
509
+ result = query_task.result()
510
+ processing_time = (datetime.now() - start_time).total_seconds()
511
+
512
+ # Send completion status
513
+ yield f"data: {json.dumps({'type': 'status', 'message': f'Analysis complete ({processing_time:.1f}s)', 'progress': 90})}\n\n"
514
+ await asyncio.sleep(0.5)
515
+
516
+ # Send final result
517
+ yield f"data: {json.dumps({'type': 'result', 'data': result.model_dump(), 'progress': 100})}\n\n"
518
+
519
+ except asyncio.TimeoutError:
520
+ processing_time = (datetime.now() - start_time).total_seconds()
521
+ logger.error(f"Query processing timed out after {processing_time:.1f}s")
522
+
523
+ # Send timeout result with available data
524
+ yield f"data: {json.dumps({'type': 'result', 'data': {'success': False, 'response': 'Analysis timed out, but tools successfully gathered data. The system collected cryptocurrency prices, DeFi protocol information, and blockchain data. Please try a simpler query or try again.', 'sources': [], 'metadata': {'timeout': True, 'processing_time': processing_time}, 'visualizations': [], 'error': 'Processing timeout'}, 'progress': 100})}\n\n"
525
+
526
+ except Exception as query_error:
527
+ processing_time = (datetime.now() - start_time).total_seconds()
528
+ logger.error(f"Query processing failed: {query_error}")
529
+
530
+ # Send error result
531
+ yield f"data: {json.dumps({'type': 'result', 'data': {'success': False, 'response': f'Analysis failed: {str(query_error)}. The system was able to gather some data but encountered an error during final processing.', 'sources': [], 'metadata': {'error': True, 'processing_time': processing_time}, 'visualizations': [], 'error': str(query_error)}, 'progress': 100})}\n\n"
532
+
533
+ # Send completion signal
534
+ yield f"data: {json.dumps({'type': 'complete'})}\n\n"
535
+
536
+ except Exception as e:
537
+ logger.error(f"Streaming error: {e}")
538
+ yield f"data: {json.dumps({'type': 'error', 'message': str(e)})}\n\n"
539
+
540
+ return StreamingResponse(
541
+ generate_progress(),
542
+ media_type="text/event-stream",
543
+ headers={
544
+ "Cache-Control": "no-cache",
545
+ "Connection": "keep-alive",
546
+ "Content-Type": "text/event-stream",
547
+ "X-Accel-Buffering": "no", # Disable buffering for nginx
548
+ "Access-Control-Allow-Origin": "*",
549
+ "Access-Control-Allow-Headers": "Content-Type",
550
+ }
551
+ )
552
 
553
  @app.get("/health")
554
  async def health_check():
 
560
  "version": "2.0.0"
561
  }
562
 
563
+ @app.get("/debug/tools")
564
+ async def debug_tools():
565
+ """Debug endpoint to test tool availability and functionality"""
566
+ try:
567
+ if not service.enabled or not service.agent:
568
+ return {
569
+ "success": False,
570
+ "error": "AI agent not enabled",
571
+ "tools_available": False,
572
+ "gemini_configured": bool(config.GEMINI_API_KEY)
573
+ }
574
+
575
+ tools_info = []
576
+ for tool in service.agent.tools:
577
+ tools_info.append({
578
+ "name": tool.name,
579
+ "description": getattr(tool, 'description', 'No description'),
580
+ "enabled": getattr(tool, 'enabled', True)
581
+ })
582
+
583
+ # Test a simple API call
584
+ test_result = None
585
+ try:
586
+ test_result = await service.process_query("What is the current Bitcoin price?")
587
+ except Exception as e:
588
+ test_result = {"error": str(e)}
589
+
590
+ return {
591
+ "success": True,
592
+ "tools_count": len(service.agent.tools),
593
+ "tools_info": tools_info,
594
+ "test_query_result": {
595
+ "success": test_result.success if hasattr(test_result, 'success') else False,
596
+ "response_length": len(test_result.response) if hasattr(test_result, 'response') else 0,
597
+ "sources": test_result.sources if hasattr(test_result, 'sources') else [],
598
+ "error": test_result.error if hasattr(test_result, 'error') else None
599
+ },
600
+ "gemini_configured": bool(config.GEMINI_API_KEY),
601
+ "timestamp": datetime.now().isoformat()
602
+ }
603
+ except Exception as e:
604
+ logger.error(f"Debug tools error: {e}")
605
+ return {
606
+ "success": False,
607
+ "error": str(e),
608
+ "timestamp": datetime.now().isoformat()
609
+ }
610
+
611
  if __name__ == "__main__":
612
  import uvicorn
613
  logger.info("Starting Web3 Research Co-Pilot...")
app_config.yaml ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ title: Web3 Research Co-Pilot
2
+ emoji: 🚀
3
+ colorFrom: blue
4
+ colorTo: green
5
+ sdk: docker
6
+ app_file: app.py
7
+ dockerfile: Dockerfile
8
+ license: mit
9
+ tags:
10
+ - cryptocurrency
11
+ - blockchain
12
+ - defi
13
+ - ai-research
14
+ - ollama
15
+ - llama3
16
+ pinned: false
17
+ header: default
18
+ short_description: AI-powered cryptocurrency research assistant with real-time data
19
+ suggested_hardware: t4-medium
debug_gemini.py ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ Debug test to understand why Gemini responses aren't being cleaned
4
+ """
5
+
6
+ import asyncio
7
+ import sys
8
+ import os
9
+
10
+ # Add src to path
11
+ sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'src'))
12
+
13
+ from langchain_google_genai import ChatGoogleGenerativeAI
14
+ from src.utils.config import config
15
+
16
+ async def test_gemini_response_structure():
17
+ """Test the structure of Gemini responses to understand the cleaning issue"""
18
+
19
+ if not config.GEMINI_API_KEY:
20
+ print("❌ No Gemini API key available")
21
+ return False
22
+
23
+ try:
24
+ print("🧪 Testing Gemini response structure...")
25
+
26
+ # Initialize Gemini
27
+ llm = ChatGoogleGenerativeAI(
28
+ model="gemini-2.0-flash-lite",
29
+ google_api_key=config.GEMINI_API_KEY,
30
+ temperature=0.1
31
+ )
32
+
33
+ # Test simple query
34
+ response = await llm.ainvoke("What is 2+2?")
35
+
36
+ print(f"📄 Response type: {type(response)}")
37
+ print(f"📄 Response dir: {[attr for attr in dir(response) if not attr.startswith('_')]}")
38
+
39
+ if hasattr(response, 'content'):
40
+ print(f"✅ Response has 'content' attribute")
41
+ print(f"📝 Content: {response.content}")
42
+ print(f"📝 Content type: {type(response.content)}")
43
+ else:
44
+ print("❌ Response does NOT have 'content' attribute")
45
+
46
+ print(f"📄 Full response: {str(response)}")
47
+
48
+ return True
49
+
50
+ except Exception as e:
51
+ print(f"❌ Test failed: {e}")
52
+ return False
53
+
54
+ async def main():
55
+ success = await test_gemini_response_structure()
56
+ if success:
57
+ print("\n🎉 Test completed!")
58
+ return 0
59
+ else:
60
+ print("\n❌ Test failed!")
61
+ return 1
62
+
63
+ if __name__ == "__main__":
64
+ exit_code = asyncio.run(main())
65
+ sys.exit(exit_code)
dev_check.sh ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ # Development syntax checker script
3
+
4
+ echo "🔍 Running development syntax check..."
5
+
6
+ # Check Python syntax using built-in compile
7
+ echo "1️⃣ Python syntax validation..."
8
+ find . -name "*.py" -not -path "./__pycache__/*" -not -path "./.*" | while read file; do
9
+ python -m py_compile "$file" 2>/dev/null
10
+ if [ $? -eq 0 ]; then
11
+ echo " ✅ $file"
12
+ else
13
+ echo " ❌ $file - SYNTAX ERROR"
14
+ python -m py_compile "$file"
15
+ exit 1
16
+ fi
17
+ done
18
+
19
+ echo ""
20
+ echo "2️⃣ Running comprehensive validation..."
21
+ python validate_startup.py
22
+
23
+ echo ""
24
+ echo "3️⃣ Quick import test..."
25
+ python -c "
26
+ try:
27
+ import app
28
+ print(' ✅ app.py imports successfully')
29
+ except Exception as e:
30
+ print(f' ❌ app.py import failed: {e}')
31
+ exit(1)
32
+
33
+ try:
34
+ from src.agent.research_agent import Web3ResearchAgent
35
+ print(' ✅ research_agent.py imports successfully')
36
+ except Exception as e:
37
+ print(f' ❌ research_agent.py import failed: {e}')
38
+ exit(1)
39
+ "
40
+
41
+ if [ $? -eq 0 ]; then
42
+ echo ""
43
+ echo "🎉 All syntax checks passed! Ready for deployment."
44
+ else
45
+ echo ""
46
+ echo "❌ Syntax check failed. Please fix errors before deploying."
47
+ exit 1
48
+ fi
src/agent/research_agent.py CHANGED
@@ -1,6 +1,5 @@
1
- from langchain.agents import AgentExecutor, create_tool_calling_agent
2
  from langchain_google_genai import ChatGoogleGenerativeAI
3
- from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder
4
  from langchain.memory import ConversationBufferWindowMemory
5
  from typing import List, Dict, Any
6
  import asyncio
@@ -8,65 +7,110 @@ from datetime import datetime
8
 
9
  from src.tools.coingecko_tool import CoinGeckoTool
10
  from src.tools.defillama_tool import DeFiLlamaTool
 
11
  from src.tools.etherscan_tool import EtherscanTool
12
- from src.agent.query_planner import QueryPlanner
 
13
  from src.utils.config import config
14
  from src.utils.logger import get_logger
 
15
 
16
  logger = get_logger(__name__)
17
 
 
 
 
 
 
 
 
18
  class Web3ResearchAgent:
19
  def __init__(self):
20
  self.llm = None
 
21
  self.tools = []
22
- self.agent = None
23
- self.executor = None
24
  self.enabled = False
25
-
26
- if not config.GEMINI_API_KEY:
27
- logger.warning("GEMINI_API_KEY not configured - AI agent disabled")
28
- return
29
 
30
  try:
31
- self.llm = ChatGoogleGenerativeAI(
32
- model="gemini-1.5-flash",
33
- google_api_key=config.GEMINI_API_KEY,
34
- temperature=0.1,
35
- max_tokens=2048
36
- )
37
 
 
 
 
 
 
38
  self.tools = self._initialize_tools()
39
- self.query_planner = QueryPlanner(self.llm)
40
- self.memory = ConversationBufferWindowMemory(
41
- memory_key="chat_history", return_messages=True, k=10
42
- )
43
-
44
- self.agent = self._create_agent()
45
- self.executor = AgentExecutor(
46
- agent=self.agent, tools=self.tools, memory=self.memory,
47
- verbose=False, max_iterations=5, handle_parsing_errors=True
48
- )
49
  self.enabled = True
50
- logger.info("Web3ResearchAgent initialized successfully")
51
-
52
  except Exception as e:
53
- logger.error(f"Agent init failed: {e}")
54
  self.enabled = False
 
 
 
 
 
 
 
 
 
 
 
 
 
55
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
56
  def _initialize_tools(self):
57
  tools = []
58
 
59
- try:
60
- tools.append(CoinGeckoTool())
61
- logger.info("CoinGecko tool initialized")
62
- except Exception as e:
63
- logger.warning(f"CoinGecko tool failed: {e}")
 
 
 
 
64
 
65
  try:
66
  tools.append(DeFiLlamaTool())
67
  logger.info("DeFiLlama tool initialized")
68
  except Exception as e:
69
  logger.warning(f"DeFiLlama tool failed: {e}")
 
 
 
 
 
 
70
 
71
  try:
72
  tools.append(EtherscanTool())
@@ -74,110 +118,577 @@ class Web3ResearchAgent:
74
  except Exception as e:
75
  logger.warning(f"Etherscan tool failed: {e}")
76
 
 
 
 
 
 
 
77
  return tools
78
-
79
- def _create_agent(self):
80
- prompt = ChatPromptTemplate.from_messages([
81
- ("system", """You are an expert Web3 research assistant. Use available tools to provide accurate,
82
- data-driven insights about cryptocurrency markets, DeFi protocols, and blockchain data.
83
-
84
- Format responses with clear sections, emojis, and actionable insights."""),
85
- MessagesPlaceholder("chat_history"),
86
- ("human", "{input}"),
87
- MessagesPlaceholder("agent_scratchpad")
88
- ])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
89
 
90
- return create_tool_calling_agent(self.llm, self.tools, prompt)
91
-
92
- async def research_query(self, query: str) -> Dict[str, Any]:
93
  if not self.enabled:
94
  return {
95
  "success": False,
96
  "query": query,
97
- "error": "AI agent not configured. Please set GEMINI_API_KEY environment variable.",
98
- "result": " **Service Unavailable**\n\nThe AI research agent requires a GEMINI_API_KEY to function.\n\nPlease:\n1. Get a free API key from [Google AI Studio](https://makersuite.google.com/app/apikey)\n2. Set environment variable: `export GEMINI_API_KEY='your_key'`\n3. Restart the application",
99
  "sources": [],
100
  "metadata": {"timestamp": datetime.now().isoformat()}
101
  }
102
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
103
  try:
104
- logger.info(f"Processing: {query}")
 
 
 
 
105
 
106
- research_plan = await self.query_planner.plan_research(query)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
107
 
108
- enhanced_query = f"""
109
- Research Query: {query}
110
- Research Plan: {research_plan.get('steps', [])}
111
- Priority: {research_plan.get('priority', 'general')}
 
 
 
 
112
 
113
- Execute systematic research and provide comprehensive analysis.
114
- """
115
 
116
- result = await asyncio.to_thread(
117
- self.executor.invoke, {"input": enhanced_query}
118
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
119
 
 
120
  return {
121
  "success": True,
122
  "query": query,
123
- "research_plan": research_plan,
124
- "result": result.get("output", "No response"),
125
- "sources": self._extract_sources(result.get("output", "")),
126
  "metadata": {
127
- "tools_used": [tool.name for tool in self.tools],
 
128
  "timestamp": datetime.now().isoformat()
129
  }
130
  }
131
 
132
  except Exception as e:
133
- logger.error(f"Research error: {e}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
134
  return {
135
- "success": False,
136
  "query": query,
137
- "error": str(e),
138
- "result": f"❌ **Research Error**: {str(e)}\n\nPlease try a different query or check your API configuration.",
139
  "sources": [],
140
- "metadata": {"timestamp": datetime.now().isoformat()}
 
 
 
 
141
  }
142
-
143
- async def get_price_history(self, symbol: str, days: int = 30) -> Dict[str, Any]:
144
- try:
145
- coingecko_tool = next(t for t in self.tools if isinstance(t, CoinGeckoTool))
146
- return await coingecko_tool._arun(symbol, {"type": "price_history", "days": days})
147
- except Exception as e:
148
- logger.error(f"Price history error: {e}")
149
- return {}
150
-
151
- async def get_comprehensive_market_data(self) -> Dict[str, Any]:
152
- try:
153
- tasks = []
154
- for tool in self.tools:
155
- if isinstance(tool, CoinGeckoTool):
156
- tasks.append(tool._arun("", {"type": "market_overview"}))
157
- elif isinstance(tool, DeFiLlamaTool):
158
- tasks.append(tool._arun("", {"type": "tvl_overview"}))
159
-
160
- results = await asyncio.gather(*tasks, return_exceptions=True)
161
-
162
- data = {}
163
- for i, result in enumerate(results):
164
- if not isinstance(result, Exception):
165
- if i == 0:
166
- data["market"] = result
167
- elif i == 1:
168
- data["defi"] = result
169
-
170
- return data
171
  except Exception as e:
172
- logger.error(f"Market data error: {e}")
173
- return {}
174
-
175
- def _extract_sources(self, result_text: str) -> List[str]:
 
 
 
 
176
  sources = []
177
- if "CoinGecko" in result_text or "coingecko" in result_text.lower():
178
- sources.append("CoinGecko API")
179
- if "DeFiLlama" in result_text or "defillama" in result_text.lower():
180
- sources.append("DeFiLlama API")
181
- if "Etherscan" in result_text or "etherscan" in result_text.lower():
182
- sources.append("Etherscan API")
 
 
183
  return sources
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  from langchain_google_genai import ChatGoogleGenerativeAI
2
+ from langchain_community.llms import Ollama
3
  from langchain.memory import ConversationBufferWindowMemory
4
  from typing import List, Dict, Any
5
  import asyncio
 
7
 
8
  from src.tools.coingecko_tool import CoinGeckoTool
9
  from src.tools.defillama_tool import DeFiLlamaTool
10
+ from src.tools.cryptocompare_tool import CryptoCompareTool
11
  from src.tools.etherscan_tool import EtherscanTool
12
+ from src.tools.chart_data_tool import ChartDataTool
13
+ from src.agent.memory_manager import MemoryManager
14
  from src.utils.config import config
15
  from src.utils.logger import get_logger
16
+ from src.utils.ai_safety import ai_safety
17
 
18
  logger = get_logger(__name__)
19
 
20
+ # Add version logging for debugging
21
+ try:
22
+ from version import VERSION
23
+ logger.info(f"🔧 Research Agent Version: {VERSION}")
24
+ except ImportError:
25
+ logger.info("🔧 Research Agent Version: Unknown")
26
+
27
  class Web3ResearchAgent:
28
  def __init__(self):
29
  self.llm = None
30
+ self.fallback_llm = None
31
  self.tools = []
 
 
32
  self.enabled = False
33
+ self.gemini_available = False
34
+ self.memory_manager = MemoryManager(window_size=10)
 
 
35
 
36
  try:
37
+ # Always initialize Ollama
38
+ logger.info("🔧 Initializing Ollama as fallback")
39
+ self._init_ollama()
 
 
 
40
 
41
+ # Try to initialize Gemini if API key is available
42
+ if config.GEMINI_API_KEY:
43
+ logger.info("🔧 Initializing Gemini as primary option")
44
+ self._init_gemini()
45
+
46
  self.tools = self._initialize_tools()
 
 
 
 
 
 
 
 
 
 
47
  self.enabled = True
48
+ logger.info("🧠 Memory Manager initialized with conversation tracking")
49
+
50
  except Exception as e:
51
+ logger.error(f"Agent initialization failed: {e}")
52
  self.enabled = False
53
+
54
+ def _init_ollama(self):
55
+ """Initialize Ollama LLM with optimized settings"""
56
+ try:
57
+ self.fallback_llm = Ollama(
58
+ model=config.OLLAMA_MODEL,
59
+ base_url=config.OLLAMA_BASE_URL,
60
+ temperature=0.1
61
+ )
62
+ logger.info(f"✅ Ollama initialized - Model: {config.OLLAMA_MODEL} (timeout optimized)")
63
+ except Exception as e:
64
+ logger.error(f"Ollama initialization failed: {e}")
65
+ raise
66
 
67
+ def _init_gemini(self):
68
+ """Initialize Gemini LLM"""
69
+ try:
70
+ self.llm = ChatGoogleGenerativeAI(
71
+ model="gemini-2.0-flash-lite", # Updated to Gemini 2.0 Flash-Lite
72
+ google_api_key=config.GEMINI_API_KEY,
73
+ temperature=0.1
74
+ )
75
+ self.gemini_available = True
76
+ logger.info("✅ Gemini initialized with gemini-2.0-flash-lite")
77
+ except Exception as e:
78
+ logger.warning(f"Gemini initialization failed: {e}")
79
+ self.gemini_available = False
80
+
81
+ def _init_ollama_only(self):
82
+ """Initialize with only Ollama LLM (deprecated - kept for compatibility)"""
83
+ self._init_ollama()
84
+
85
+ def _init_with_gemini_fallback(self):
86
+ """Initialize with Gemini primary and Ollama fallback (deprecated - kept for compatibility)"""
87
+ self._init_ollama()
88
+ self._init_gemini()
89
+
90
  def _initialize_tools(self):
91
  tools = []
92
 
93
+ # Skip CoinGecko if no API key available
94
+ if config.COINGECKO_API_KEY:
95
+ try:
96
+ tools.append(CoinGeckoTool())
97
+ logger.info("CoinGecko tool initialized")
98
+ except Exception as e:
99
+ logger.warning(f"CoinGecko tool failed: {e}")
100
+ else:
101
+ logger.info("CoinGecko tool skipped - no API key available")
102
 
103
  try:
104
  tools.append(DeFiLlamaTool())
105
  logger.info("DeFiLlama tool initialized")
106
  except Exception as e:
107
  logger.warning(f"DeFiLlama tool failed: {e}")
108
+
109
+ try:
110
+ tools.append(CryptoCompareTool())
111
+ logger.info("CryptoCompare tool initialized")
112
+ except Exception as e:
113
+ logger.warning(f"CryptoCompare tool failed: {e}")
114
 
115
  try:
116
  tools.append(EtherscanTool())
 
118
  except Exception as e:
119
  logger.warning(f"Etherscan tool failed: {e}")
120
 
121
+ try:
122
+ tools.append(ChartDataTool())
123
+ logger.info("ChartDataTool initialized")
124
+ except Exception as e:
125
+ logger.warning(f"ChartDataTool failed: {e}")
126
+
127
  return tools
128
+
129
+ async def research_query(self, query: str, use_gemini: bool = False) -> Dict[str, Any]:
130
+ """Research query with dynamic LLM selection - Enhanced with AI Safety"""
131
+
132
+ # AI Safety Check 1: Sanitize and validate input
133
+ sanitized_query, is_safe, safety_reason = ai_safety.sanitize_query(query)
134
+ if not is_safe:
135
+ ai_safety.log_safety_event("blocked_query", {
136
+ "original_query": query[:100],
137
+ "reason": safety_reason,
138
+ "timestamp": datetime.now().isoformat()
139
+ })
140
+ return {
141
+ "success": False,
142
+ "query": query,
143
+ "error": f"Safety filter: {safety_reason}",
144
+ "result": "Your query was blocked by our safety filters. Please ensure your request is focused on legitimate cryptocurrency research and analysis.",
145
+ "sources": [],
146
+ "metadata": {"timestamp": datetime.now().isoformat(), "safety_blocked": True}
147
+ }
148
+
149
+ # AI Safety Check 2: Rate limiting
150
+ rate_ok, rate_message = ai_safety.check_rate_limit()
151
+ if not rate_ok:
152
+ ai_safety.log_safety_event("rate_limit", {
153
+ "message": rate_message,
154
+ "timestamp": datetime.now().isoformat()
155
+ })
156
+ return {
157
+ "success": False,
158
+ "query": query,
159
+ "error": "Rate limit exceeded",
160
+ "result": f"Please wait before making another request. {rate_message}",
161
+ "sources": [],
162
+ "metadata": {"timestamp": datetime.now().isoformat(), "rate_limited": True}
163
+ }
164
 
 
 
 
165
  if not self.enabled:
166
  return {
167
  "success": False,
168
  "query": query,
169
+ "error": "Research agent not initialized",
170
+ "result": "Research service not available. Please check configuration.",
171
  "sources": [],
172
  "metadata": {"timestamp": datetime.now().isoformat()}
173
  }
174
 
175
+ # Get conversation context from memory
176
+ memory_context = self.memory_manager.get_relevant_context(sanitized_query)
177
+ logger.info(f"🧠 Retrieved memory context: {len(memory_context.get('cached_context', []))} relevant items")
178
+
179
+ try:
180
+ # Choose LLM based on user preference and availability
181
+ if use_gemini and self.gemini_available:
182
+ logger.info("🤖 Processing with Gemini + Tools (Safety Enhanced + Memory)")
183
+ result = await self._research_with_gemini_tools(sanitized_query, memory_context)
184
+ else:
185
+ logger.info("🤖 Processing with Ollama + Tools (Safety Enhanced + Memory)")
186
+ result = await self._research_with_ollama_tools(sanitized_query, memory_context)
187
+
188
+ # Save successful interaction to memory
189
+ if result.get("success"):
190
+ metadata = {
191
+ "llm_used": result.get("metadata", {}).get("llm_used", "unknown"),
192
+ "tools_used": result.get("metadata", {}).get("tools_used", []),
193
+ "timestamp": datetime.now().isoformat(),
194
+ "sources": result.get("sources", [])
195
+ }
196
+ self.memory_manager.add_interaction(query, result["result"], metadata)
197
+ logger.info("🧠 Interaction saved to memory")
198
+
199
+ return result
200
+
201
+ except Exception as e:
202
+ logger.error(f"Research failed: {e}")
203
+ # Fallback to simple Ollama response with safety
204
+ try:
205
+ safe_prompt = ai_safety.create_safe_prompt(sanitized_query, "Limited context available")
206
+ simple_response = await self.fallback_llm.ainvoke(safe_prompt)
207
+
208
+ # Validate response safety
209
+ clean_response, response_safe, response_reason = ai_safety.validate_ollama_response(simple_response)
210
+ if not response_safe:
211
+ ai_safety.log_safety_event("blocked_response", {
212
+ "reason": response_reason,
213
+ "timestamp": datetime.now().isoformat()
214
+ })
215
+ return {
216
+ "success": False,
217
+ "query": query,
218
+ "error": "Response safety filter",
219
+ "result": "The AI response was blocked by safety filters. Please try a different query.",
220
+ "sources": [],
221
+ "metadata": {"timestamp": datetime.now().isoformat(), "response_blocked": True}
222
+ }
223
+
224
+ return {
225
+ "success": True,
226
+ "query": query,
227
+ "result": clean_response,
228
+ "sources": [],
229
+ "metadata": {"llm": "ollama", "mode": "simple", "timestamp": datetime.now().isoformat()}
230
+ }
231
+ except Exception as fallback_error:
232
+ logger.error(f"Fallback response failed: {fallback_error}")
233
+ return {
234
+ "success": False,
235
+ "query": query,
236
+ "error": str(fallback_error),
237
+ "result": f"Research failed: {str(fallback_error)}",
238
+ "sources": [],
239
+ "metadata": {"timestamp": datetime.now().isoformat()}
240
+ }
241
+
242
+ async def _research_with_ollama_tools(self, query: str, memory_context: Dict[str, Any] = None) -> Dict[str, Any]:
243
+ """Research using Ollama with manual tool calling - Enhanced with memory"""
244
  try:
245
+ # Step 1: Analyze query to determine which tools to use
246
+ # Include memory context in analysis if available
247
+ context_note = ""
248
+ if memory_context and memory_context.get("cached_context"):
249
+ context_note = f"\n\nPrevious context: {len(memory_context['cached_context'])} related queries found"
250
 
251
+ tool_analysis_prompt = f"""Tools for: "{query}"{context_note}
252
+
253
+ cryptocompare_data: crypto prices
254
+ etherscan_data: Ethereum data
255
+ defillama_data: DeFi TVL
256
+ chart_data_provider: charts
257
+
258
+ Bitcoin price → cryptocompare_data
259
+ DeFi TVL → defillama_data
260
+ Ethereum → etherscan_data
261
+
262
+ Answer with tool names:"""
263
+ try:
264
+ tool_response = await asyncio.wait_for(
265
+ self.fallback_llm.ainvoke(tool_analysis_prompt),
266
+ timeout=30 # 30 second timeout for tool analysis
267
+ )
268
+ logger.info(f"🧠 Ollama tool analysis response: {str(tool_response)[:500]}...")
269
+
270
+ # Clean up the response and extract tool names
271
+ response_text = str(tool_response).lower()
272
+ suggested_tools = []
273
+
274
+ # Check for each tool in the response
275
+ tool_mappings = {
276
+ 'cryptocompare': 'cryptocompare_data',
277
+ 'defillama': 'defillama_data',
278
+ 'etherscan': 'etherscan_data',
279
+ 'chart': 'chart_data_provider'
280
+ }
281
+
282
+ for keyword, tool_name in tool_mappings.items():
283
+ if keyword in response_text:
284
+ suggested_tools.append(tool_name)
285
+
286
+ except asyncio.TimeoutError:
287
+ logger.warning("⏱️ Tool analysis timed out, using fallback tool selection")
288
+ # Fallback tool selection based on query keywords
289
+ suggested_tools = []
290
+ query_lower = query.lower()
291
+
292
+ if any(word in query_lower for word in ['price', 'bitcoin', 'btc', 'ethereum', 'eth', 'crypto']):
293
+ suggested_tools.append('cryptocompare_data')
294
+ if 'defi' in query_lower or 'tvl' in query_lower:
295
+ suggested_tools.append('defillama_data')
296
+ if 'ethereum' in query_lower or 'gas' in query_lower:
297
+ suggested_tools.append('etherscan_data')
298
+ if any(word in query_lower for word in ['chart', 'graph', 'visualization', 'trend']):
299
+ suggested_tools.append('chart_data_provider')
300
+
301
+ # Default to basic crypto data if no matches
302
+ if not suggested_tools:
303
+ suggested_tools = ['cryptocompare_data']
304
 
305
+ # Default to at least one relevant tool if parsing fails
306
+ if not suggested_tools:
307
+ if any(word in query.lower() for word in ['price', 'bitcoin', 'ethereum', 'crypto']):
308
+ suggested_tools = ['cryptocompare_data']
309
+ elif 'defi' in query.lower() or 'tvl' in query.lower():
310
+ suggested_tools = ['defillama_data']
311
+ else:
312
+ suggested_tools = ['cryptocompare_data']
313
 
314
+ logger.info(f"🛠️ Ollama suggested tools: {suggested_tools}")
 
315
 
316
+ # Step 2: Execute relevant tools
317
+ tool_results = []
318
+ try:
319
+ for tool_name in suggested_tools:
320
+ tool = next((t for t in self.tools if t.name == tool_name), None)
321
+ if tool:
322
+ try:
323
+ logger.info(f"🔧 Executing {tool_name}")
324
+
325
+ # Handle chart_data_provider with proper parameters
326
+ if tool_name == "chart_data_provider":
327
+ # Extract chart type from query or default to price_chart
328
+ chart_type = "price_chart" # Default
329
+ symbol = "bitcoin" # Default
330
+
331
+ if "defi" in query.lower() or "tvl" in query.lower():
332
+ chart_type = "defi_tvl"
333
+ elif "market" in query.lower() or "overview" in query.lower():
334
+ chart_type = "market_overview"
335
+ elif "gas" in query.lower():
336
+ chart_type = "gas_tracker"
337
+
338
+ # Extract symbol if mentioned
339
+ if "ethereum" in query.lower() or "eth" in query.lower():
340
+ symbol = "ethereum"
341
+ elif "bitcoin" in query.lower() or "btc" in query.lower():
342
+ symbol = "bitcoin"
343
+
344
+ result = await tool._arun(chart_type=chart_type, symbol=symbol)
345
+ else:
346
+ # Other tools use the query directly
347
+ result = await tool._arun(query)
348
+
349
+ logger.info(f"📊 {tool_name} result preview: {str(result)[:200]}...")
350
+ tool_results.append(f"=== {tool_name} Results ===\n{result}\n")
351
+ except Exception as e:
352
+ logger.error(f"Tool {tool_name} failed: {e}")
353
+ tool_results.append(f"=== {tool_name} Error ===\nTool failed: {str(e)}\n")
354
+ finally:
355
+ # Cleanup tool session if available
356
+ if hasattr(tool, 'cleanup'):
357
+ try:
358
+ await tool.cleanup()
359
+ except Exception:
360
+ pass # Ignore cleanup errors
361
+ finally:
362
+ # Ensure all tools are cleaned up
363
+ for tool in self.tools:
364
+ if hasattr(tool, 'cleanup'):
365
+ try:
366
+ await tool.cleanup()
367
+ except Exception:
368
+ pass
369
+
370
+ # Step 3: Generate final response with tool results using AI Safety
371
+ context = "\n".join(tool_results) if tool_results else "No tool data available - provide general information."
372
+
373
+ # Use AI Safety to create a safe prompt
374
+ final_prompt = ai_safety.create_safe_prompt(query, context)
375
+
376
+ # Add timeout for final response to prevent web request timeout
377
+ try:
378
+ final_response = await asyncio.wait_for(
379
+ self.fallback_llm.ainvoke(final_prompt),
380
+ timeout=440 # 90 second timeout for Llama 3.1 8B model
381
+ )
382
+ logger.info(f"🎯 Ollama final response preview: {str(final_response)[:300]}...")
383
+
384
+ # Extract content from Ollama response
385
+ response_content = str(final_response)
386
+
387
+ # AI Safety Check: Validate response
388
+ clean_response, response_safe, response_reason = ai_safety.validate_ollama_response(response_content)
389
+ if not response_safe:
390
+ ai_safety.log_safety_event("blocked_ollama_response", {
391
+ "reason": response_reason,
392
+ "query": query[:100],
393
+ "timestamp": datetime.now().isoformat()
394
+ })
395
+ # Use tool data directly instead of unsafe response
396
+ clean_response = f"""## Cryptocurrency Analysis
397
+
398
+ Based on the available data:
399
+
400
+ {context[:1000]}
401
+
402
+ *Response generated from verified tool data for safety compliance.*"""
403
+
404
+ final_response = clean_response
405
+
406
+ except asyncio.TimeoutError:
407
+ logger.warning("⏱️ Ollama final response timed out (60s), using enhanced tool summary")
408
+ # Create a better summary from the tool results
409
+ summary_parts = []
410
+
411
+ if "cryptocompare_data" in suggested_tools:
412
+ summary_parts.append("📊 **Price Data**: Live cryptocurrency prices retrieved")
413
+ if "defillama_data" in suggested_tools:
414
+ summary_parts.append("🔒 **DeFi Data**: Protocol TVL and yield information available")
415
+ if "etherscan_data" in suggested_tools:
416
+ summary_parts.append("⛓️ **Blockchain Data**: Ethereum network information gathered")
417
+ if "chart_data_provider" in suggested_tools:
418
+ summary_parts.append("📈 **Chart Data**: Visualization data prepared")
419
+
420
+ # Extract key data points from tool results
421
+ key_data = ""
422
+ if tool_results:
423
+ for result in tool_results[:2]: # Use first 2 tool results
424
+ if "USD" in result:
425
+ # Extract price info
426
+ lines = result.split('\n')
427
+ for line in lines:
428
+ if "USD" in line and "$" in line:
429
+ key_data += f"\n{line.strip()}"
430
+ break
431
+
432
+ final_response = f"""## {query.title()}
433
+
434
+ {chr(10).join(summary_parts)}
435
+
436
+ **Key Findings**:{key_data}
437
+
438
+ The system successfully executed {len(suggested_tools)} data tools:
439
+ • {', '.join(suggested_tools)}
440
+
441
+ *Complete analysis available - AI processing optimized for speed.*"""
442
 
443
+ logger.info("✅ Research successful with Ollama + tools")
444
  return {
445
  "success": True,
446
  "query": query,
447
+ "result": final_response,
448
+ "sources": [],
 
449
  "metadata": {
450
+ "llm_used": f"Ollama ({config.OLLAMA_MODEL})",
451
+ "tools_used": suggested_tools,
452
  "timestamp": datetime.now().isoformat()
453
  }
454
  }
455
 
456
  except Exception as e:
457
+ logger.error(f"Ollama tools research failed: {e}")
458
+ raise e
459
+
460
+ async def _research_with_gemini_tools(self, query: str, memory_context: Dict[str, Any] = None) -> Dict[str, Any]:
461
+ """Research using Gemini with tools - Enhanced with memory"""
462
+ try:
463
+ # Step 1: Analyze query and suggest tools using Gemini
464
+ # Include memory context if available
465
+ context_info = ""
466
+ if memory_context and memory_context.get("cached_context"):
467
+ recent_tools = []
468
+ for ctx in memory_context["cached_context"][:2]: # Last 2 contexts
469
+ if "tools_used" in ctx:
470
+ recent_tools.extend(ctx["tools_used"])
471
+ if recent_tools:
472
+ context_info = f"\n\nRecent tools used: {', '.join(set(recent_tools))}"
473
+
474
+ tool_analysis_prompt = f"""Tools for: "{query}"{context_info}
475
+
476
+ cryptocompare_data: crypto prices
477
+ etherscan_data: Ethereum data
478
+ defillama_data: DeFi TVL
479
+ chart_data_provider: charts
480
+
481
+ List tool names:"""
482
+
483
+ try:
484
+ tool_response = await asyncio.wait_for(
485
+ self.llm.ainvoke(tool_analysis_prompt),
486
+ timeout=30 # 30 second timeout for Gemini tool analysis
487
+ )
488
+
489
+ logger.info(f"🧠 Gemini tool analysis response: {str(tool_response)[:100]}...")
490
+
491
+ # Parse suggested tools
492
+ suggested_tools = [tool.strip() for tool in str(tool_response).split(',') if tool.strip()]
493
+ suggested_tools = [tool for tool in suggested_tools if tool in {
494
+ 'cryptocompare_data', 'defillama_data',
495
+ 'etherscan_data', 'chart_data_provider'
496
+ }]
497
+
498
+ # If no valid tools found, extract from response content
499
+ if not suggested_tools:
500
+ response_text = str(tool_response).lower()
501
+ if 'cryptocompare' in response_text:
502
+ suggested_tools.append('cryptocompare_data')
503
+ if 'defillama' in response_text:
504
+ suggested_tools.append('defillama_data')
505
+ if 'etherscan' in response_text:
506
+ suggested_tools.append('etherscan_data')
507
+ if 'chart' in response_text or 'visualization' in response_text:
508
+ suggested_tools.append('chart_data_provider')
509
+
510
+ except asyncio.TimeoutError:
511
+ logger.warning("⏱️ Gemini tool analysis timed out, using fallback tool selection")
512
+ # Same fallback logic as Ollama
513
+ suggested_tools = []
514
+ query_lower = query.lower()
515
+
516
+ if any(word in query_lower for word in ['price', 'bitcoin', 'btc', 'ethereum', 'eth', 'crypto']):
517
+ suggested_tools.append('cryptocompare_data')
518
+ if 'defi' in query_lower or 'tvl' in query_lower:
519
+ suggested_tools.append('defillama_data')
520
+ if 'ethereum' in query_lower or 'gas' in query_lower:
521
+ suggested_tools.append('etherscan_data')
522
+ if any(word in query_lower for word in ['chart', 'graph', 'visualization', 'trend']):
523
+ suggested_tools.append('chart_data_provider')
524
+
525
+ if not suggested_tools:
526
+ suggested_tools = ['cryptocompare_data']
527
+
528
+ logger.info(f"🛠️ Gemini suggested tools: {suggested_tools}")
529
+
530
+ # Step 2: Execute tools (same logic as Ollama version)
531
+ tool_results = []
532
+ try:
533
+ for tool_name in suggested_tools:
534
+ tool = next((t for t in self.tools if t.name == tool_name), None)
535
+ if tool:
536
+ try:
537
+ logger.info(f"🔧 Executing {tool_name}")
538
+
539
+ # Handle chart_data_provider with proper parameters
540
+ if tool_name == "chart_data_provider":
541
+ chart_type = "price_chart"
542
+ symbol = "bitcoin"
543
+
544
+ if "defi" in query.lower() or "tvl" in query.lower():
545
+ chart_type = "defi_tvl"
546
+ elif "market" in query.lower() or "overview" in query.lower():
547
+ chart_type = "market_overview"
548
+ elif "gas" in query.lower():
549
+ chart_type = "gas_tracker"
550
+
551
+ if "ethereum" in query.lower() or "eth" in query.lower():
552
+ symbol = "ethereum"
553
+ elif "bitcoin" in query.lower() or "btc" in query.lower():
554
+ symbol = "bitcoin"
555
+
556
+ result = await tool._arun(chart_type=chart_type, symbol=symbol)
557
+ else:
558
+ result = await tool._arun(query)
559
+
560
+ logger.info(f"📊 {tool_name} result preview: {str(result)[:200]}...")
561
+ tool_results.append(f"=== {tool_name} Results ===\n{result}\n")
562
+ except Exception as e:
563
+ logger.error(f"Tool {tool_name} failed: {e}")
564
+ tool_results.append(f"=== {tool_name} Error ===\nTool failed: {str(e)}\n")
565
+ finally:
566
+ # Cleanup tool session if available
567
+ if hasattr(tool, 'cleanup'):
568
+ try:
569
+ await tool.cleanup()
570
+ except Exception:
571
+ pass # Ignore cleanup errors
572
+ finally:
573
+ # Ensure all tools are cleaned up
574
+ for tool in self.tools:
575
+ if hasattr(tool, 'cleanup'):
576
+ try:
577
+ await tool.cleanup()
578
+ except Exception:
579
+ pass
580
+
581
+ # Step 3: Generate final response with Gemini
582
+ context = "\n".join(tool_results) if tool_results else "No tool data available - provide general information."
583
+
584
+ final_prompt = ai_safety.create_safe_prompt(query, context)
585
+
586
+ try:
587
+ final_response = await asyncio.wait_for(
588
+ self.llm.ainvoke(final_prompt),
589
+ timeout=60 # 60 second timeout for complex analysis
590
+ )
591
+ logger.info(f"🎯 Gemini final response preview: {str(final_response)[:300]}...")
592
+
593
+ # Extract content from Gemini response object
594
+ if hasattr(final_response, 'content'):
595
+ response_content = final_response.content
596
+ logger.info(f"✅ Extracted clean content: {response_content[:200]}...")
597
+ else:
598
+ response_content = str(final_response)
599
+ logger.warning(f"⚠️ Fallback to str() conversion: {response_content[:200]}...")
600
+
601
+ # AI Safety Check: Validate response
602
+ clean_response, response_safe, response_reason = ai_safety.validate_gemini_response(response_content)
603
+ if not response_safe:
604
+ ai_safety.log_safety_event("blocked_gemini_response", {
605
+ "reason": response_reason,
606
+ "query": query[:100],
607
+ "timestamp": datetime.now().isoformat()
608
+ })
609
+ clean_response = f"## Cryptocurrency Analysis\n\nBased on the available data:\n\n{context[:1000]}\n\n*Response filtered for safety*"
610
+
611
+ logger.info(f"🔒 Final clean response: {clean_response[:200]}...")
612
+ final_response = clean_response
613
+
614
+ except asyncio.TimeoutError:
615
+ logger.warning("⏱️ Gemini final response timed out (60s), using enhanced tool summary")
616
+
617
+ # Create enhanced summary from tools
618
+ summary_parts = []
619
+ if "cryptocompare_data" in suggested_tools:
620
+ summary_parts.append("📊 **Market Data**: Real-time cryptocurrency prices")
621
+ if "defillama_data" in suggested_tools:
622
+ summary_parts.append("🏛️ **DeFi Analytics**: Protocol TVL and performance metrics")
623
+ if "etherscan_data" in suggested_tools:
624
+ summary_parts.append("⛓️ **On-Chain Data**: Ethereum blockchain insights")
625
+ if "chart_data_provider" in suggested_tools:
626
+ summary_parts.append("📈 **Visualizations**: Chart data prepared")
627
+
628
+ final_response = f"""## Web3 Research Analysis
629
+
630
+ {chr(10).join(summary_parts)}
631
+
632
+ **Data Sources Processed**: {len(suggested_tools)} tools executed successfully
633
+
634
+ {context[:800] if context else 'Tool data processing completed'}
635
+
636
+ *Analysis optimized for real-time delivery*"""
637
+
638
+ logger.info("✅ Research successful with Gemini + tools")
639
+
640
+ # Final safety check: ensure we're not returning raw LangChain objects
641
+ if isinstance(final_response, str):
642
+ if "additional_kwargs" in final_response or "response_metadata" in final_response:
643
+ logger.error("🚨 CRITICAL: Raw LangChain metadata detected in final response!")
644
+ final_response = "Response contains technical metadata and has been filtered for safety."
645
+
646
  return {
647
+ "success": True,
648
  "query": query,
649
+ "result": final_response,
 
650
  "sources": [],
651
+ "metadata": {
652
+ "llm_used": f"Gemini ({self.llm.model_name if hasattr(self.llm, 'model_name') else 'gemini-1.5-flash'})",
653
+ "tools_used": suggested_tools,
654
+ "timestamp": datetime.now().isoformat()
655
+ }
656
  }
657
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
658
  except Exception as e:
659
+ logger.error(f"Gemini tools research failed: {e}")
660
+ # Fallback to Ollama if Gemini fails
661
+ logger.info("🔄 Falling back to Ollama due to Gemini error")
662
+ return await self._research_with_ollama_tools(query)
663
+
664
+ def _extract_sources(self, response: str) -> List[str]:
665
+ """Extract sources from response"""
666
+ # Simple source extraction - can be enhanced
667
  sources = []
668
+ if "CoinGecko" in response or "coingecko" in response.lower():
669
+ sources.append("CoinGecko")
670
+ if "DeFiLlama" in response or "defillama" in response.lower():
671
+ sources.append("DeFiLlama")
672
+ if "Etherscan" in response or "etherscan" in response.lower():
673
+ sources.append("Etherscan")
674
+ if "CryptoCompare" in response or "cryptocompare" in response.lower():
675
+ sources.append("CryptoCompare")
676
  return sources
677
+
678
+ def get_conversation_history(self) -> Dict[str, Any]:
679
+ """Get conversation history from memory"""
680
+ return self.memory_manager.get_relevant_context("")
681
+
682
+ def clear_conversation_memory(self):
683
+ """Clear conversation memory"""
684
+ self.memory_manager.clear_memory()
685
+ logger.info("🧠 Conversation memory cleared")
686
+
687
+ def get_memory_stats(self) -> Dict[str, Any]:
688
+ """Get memory usage statistics"""
689
+ history = self.memory_manager.memory.load_memory_variables({})
690
+ return {
691
+ "total_interactions": len(history.get("chat_history", [])) // 2, # Each interaction has input+output
692
+ "cached_contexts": len(self.memory_manager.context_cache),
693
+ "memory_enabled": True
694
+ }
src/tools/base_tool.py CHANGED
@@ -1,7 +1,7 @@
1
  from abc import ABC, abstractmethod
2
- from typing import Dict, Any, Optional
3
  from langchain.tools import BaseTool
4
- from pydantic import BaseModel, Field, PrivateAttr
5
  import asyncio
6
  import aiohttp
7
  import hashlib
@@ -14,7 +14,19 @@ logger = get_logger(__name__)
14
 
15
  class Web3ToolInput(BaseModel):
16
  query: str = Field(description="Search query or parameter")
17
- filters: Optional[Dict[str, Any]] = Field(default=None, description="Additional filters")
 
 
 
 
 
 
 
 
 
 
 
 
18
 
19
  class BaseWeb3Tool(BaseTool, ABC):
20
  name: str = "base_web3_tool"
@@ -72,11 +84,11 @@ class BaseWeb3Tool(BaseTool, ABC):
72
  key_data = f"{url}:{json.dumps(params, sort_keys=True)}"
73
  return hashlib.md5(key_data.encode()).hexdigest()[:16]
74
 
75
- def _run(self, query: str, filters: Optional[Dict[str, Any]] = None) -> str:
76
  return asyncio.run(self._arun(query, filters))
77
-
78
  @abstractmethod
79
- async def _arun(self, query: str, filters: Optional[Dict[str, Any]] = None) -> str:
80
  pass
81
 
82
  async def cleanup(self):
 
1
  from abc import ABC, abstractmethod
2
+ from typing import Dict, Any, Optional, Union
3
  from langchain.tools import BaseTool
4
+ from pydantic import BaseModel, Field, PrivateAttr, field_validator
5
  import asyncio
6
  import aiohttp
7
  import hashlib
 
14
 
15
  class Web3ToolInput(BaseModel):
16
  query: str = Field(description="Search query or parameter")
17
+ filters: Optional[Union[Dict[str, Any], str]] = Field(default=None, description="Additional filters (dict) or filter type (string)")
18
+
19
+ @field_validator('filters')
20
+ @classmethod
21
+ def validate_filters(cls, v):
22
+ if v is None:
23
+ return None
24
+ if isinstance(v, str):
25
+ # Convert string filter to dict format
26
+ return {"type": v}
27
+ if isinstance(v, dict):
28
+ return v
29
+ return None
30
 
31
  class BaseWeb3Tool(BaseTool, ABC):
32
  name: str = "base_web3_tool"
 
84
  key_data = f"{url}:{json.dumps(params, sort_keys=True)}"
85
  return hashlib.md5(key_data.encode()).hexdigest()[:16]
86
 
87
+ def _run(self, query: str, filters: Optional[Dict[str, Any]] = None, **kwargs) -> str:
88
  return asyncio.run(self._arun(query, filters))
89
+
90
  @abstractmethod
91
+ async def _arun(self, query: str, filters: Optional[Dict[str, Any]] = None, **kwargs) -> str:
92
  pass
93
 
94
  async def cleanup(self):
src/tools/chart_creator_tool.py ADDED
@@ -0,0 +1,356 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain.tools import BaseTool
2
+ from pydantic import BaseModel, Field
3
+ from typing import Dict, Any, List, Optional
4
+ import json
5
+ import asyncio
6
+ from datetime import datetime
7
+
8
+ from src.visualizations import CryptoVisualizations
9
+ from src.tools.coingecko_tool import CoinGeckoTool
10
+ from src.tools.defillama_tool import DeFiLlamaTool
11
+ from src.tools.etherscan_tool import EtherscanTool
12
+ from src.utils.logger import get_logger
13
+
14
+ logger = get_logger(__name__)
15
+
16
+ class ChartCreatorInput(BaseModel):
17
+ """Input schema for chart creation requests - accepts only essential parameters"""
18
+ chart_type: str = Field(
19
+ description="Chart type: price_chart, market_overview, defi_tvl, portfolio_pie, gas_tracker"
20
+ )
21
+ symbol: Optional[str] = Field(
22
+ default=None,
23
+ description="Asset symbol (e.g., bitcoin, ethereum) for price/market charts"
24
+ )
25
+ timeframe: Optional[str] = Field(
26
+ default="30d",
27
+ description="Time range: 1d, 7d, 30d, 90d, 365d"
28
+ )
29
+ protocols: Optional[List[str]] = Field(
30
+ default=None,
31
+ description="Protocol names for DeFi TVL charts (e.g., ['uniswap', 'aave'])"
32
+ )
33
+ network: Optional[str] = Field(
34
+ default="ethereum",
35
+ description="Blockchain network for gas tracker (ethereum, polygon, etc.)"
36
+ )
37
+
38
+ class ChartCreatorTool(BaseTool):
39
+ """
40
+ Intelligent Chart Creator Tool
41
+
42
+ This tool can create various types of cryptocurrency and DeFi charts by:
43
+ 1. Understanding chart requirements from natural language
44
+ 2. Fetching appropriate data from available sources
45
+ 3. Generating professional visualizations
46
+ """
47
+
48
+ name: str = "chart_creator"
49
+ description: str = """Create cryptocurrency and DeFi charts with specific parameters only.
50
+
51
+ IMPORTANT: Only pass essential chart parameters - do not send full user queries.
52
+
53
+ Chart types and required parameters:
54
+ - price_chart: symbol (e.g., "bitcoin"), timeframe (e.g., "30d")
55
+ - market_overview: symbol (optional), timeframe (default "30d")
56
+ - defi_tvl: protocols (list of protocol names), timeframe (optional)
57
+ - portfolio_pie: No parameters needed (uses default allocation)
58
+ - gas_tracker: network (e.g., "ethereum"), timeframe (optional)
59
+
60
+ Examples of CORRECT usage:
61
+ - price_chart for Bitcoin: symbol="bitcoin", timeframe="30d"
62
+ - DeFi TVL chart: protocols=["uniswap", "aave"], timeframe="7d"
63
+ - Gas tracker: network="ethereum", timeframe="1d"
64
+ """
65
+
66
+ # Define fields
67
+ viz: Any = None
68
+ coingecko: Any = None
69
+ defillama: Any = None
70
+ etherscan: Any = None
71
+
72
+ args_schema: type[ChartCreatorInput] = ChartCreatorInput
73
+
74
+ def __init__(self):
75
+ super().__init__()
76
+ self.viz = CryptoVisualizations()
77
+ self.coingecko = CoinGeckoTool()
78
+ self.defillama = DeFiLlamaTool()
79
+ self.etherscan = EtherscanTool()
80
+
81
+ def _run(self, chart_type: str, symbol: str = None, timeframe: str = "30d",
82
+ protocols: List[str] = None, network: str = "ethereum") -> str:
83
+ """Synchronous execution (not used in async context)"""
84
+ return asyncio.run(self._arun(chart_type, symbol, timeframe, protocols, network))
85
+
86
+ async def _arun(self, chart_type: str, symbol: str = None, timeframe: str = "30d",
87
+ protocols: List[str] = None, network: str = "ethereum") -> str:
88
+ """Create charts with controlled parameters"""
89
+ try:
90
+ logger.info(f"Creating {chart_type} chart for {symbol or 'general'} with timeframe {timeframe}")
91
+
92
+ # Build parameters from clean inputs
93
+ parameters = {
94
+ "symbol": symbol,
95
+ "timeframe": timeframe,
96
+ "protocols": protocols,
97
+ "network": network,
98
+ "days": self._parse_timeframe(timeframe)
99
+ }
100
+
101
+ # Determine data source based on chart type
102
+ data_source = self._get_data_source(chart_type)
103
+
104
+ # Fetch data based on source and chart type
105
+ data = await self._fetch_chart_data(chart_type, parameters, data_source)
106
+
107
+ if not data:
108
+ return json.dumps({
109
+ "status": "error",
110
+ "message": f"Unable to fetch data for {chart_type} from {data_source}",
111
+ "alternative": f"Try requesting textual analysis instead, or use different parameters",
112
+ "chart_html": None
113
+ })
114
+
115
+ # Create the appropriate chart
116
+ chart_html = await self._create_chart(chart_type, data, parameters)
117
+
118
+ if chart_html:
119
+ logger.info(f"Successfully created {chart_type} chart")
120
+ return json.dumps({
121
+ "status": "success",
122
+ "message": f"Successfully created {chart_type} chart",
123
+ "chart_html": chart_html,
124
+ "data_source": data_source
125
+ })
126
+ else:
127
+ return json.dumps({
128
+ "status": "error",
129
+ "message": f"Chart creation failed for {chart_type}",
130
+ "alternative": f"Data was retrieved but visualization failed. Providing textual analysis instead.",
131
+ "chart_html": None
132
+ })
133
+
134
+ except Exception as e:
135
+ logger.error(f"Chart creation error: {e}")
136
+ return json.dumps({
137
+ "status": "error",
138
+ "message": f"Chart creation failed: {str(e)}",
139
+ "alternative": "Please try again with different parameters or request textual analysis",
140
+ "chart_html": None
141
+ })
142
+
143
+ async def _fetch_chart_data(self, chart_type: str, parameters: Dict[str, Any], data_source: str) -> Optional[Dict[str, Any]]:
144
+ """Fetch data from appropriate source based on chart type"""
145
+ try:
146
+ if data_source == "coingecko":
147
+ return await self._fetch_coingecko_data(chart_type, parameters)
148
+ elif data_source == "defillama":
149
+ return await self._fetch_defillama_data(chart_type, parameters)
150
+ elif data_source == "etherscan":
151
+ return await self._fetch_etherscan_data(chart_type, parameters)
152
+ else:
153
+ logger.warning(f"Unknown data source: {data_source}")
154
+ return None
155
+
156
+ except Exception as e:
157
+ logger.error(f"Data fetch error: {e}")
158
+ return None
159
+
160
+ async def _fetch_coingecko_data(self, chart_type: str, parameters: Dict[str, Any]) -> Optional[Dict[str, Any]]:
161
+ """Fetch data from CoinGecko API"""
162
+ try:
163
+ if chart_type == "price_chart":
164
+ symbol = parameters.get("symbol", "bitcoin")
165
+ days = parameters.get("days", 30)
166
+
167
+ # Create mock price data
168
+ base_timestamp = 1704067200000 # Jan 1, 2024
169
+ mock_data = {
170
+ "prices": [[base_timestamp + i * 86400000, 35000 + i * 100 + (i % 7) * 500] for i in range(days)],
171
+ "total_volumes": [[base_timestamp + i * 86400000, 1000000 + i * 10000 + (i % 5) * 50000] for i in range(days)],
172
+ "symbol": symbol,
173
+ "days": days
174
+ }
175
+ return mock_data
176
+
177
+ elif chart_type == "market_overview":
178
+ # Create mock market data
179
+ mock_data = {
180
+ "coins": [
181
+ {"name": "Bitcoin", "symbol": "BTC", "current_price": 35000, "market_cap_rank": 1, "price_change_percentage_24h": 2.5},
182
+ {"name": "Ethereum", "symbol": "ETH", "current_price": 1800, "market_cap_rank": 2, "price_change_percentage_24h": -1.2},
183
+ {"name": "Cardano", "symbol": "ADA", "current_price": 0.25, "market_cap_rank": 3, "price_change_percentage_24h": 3.1}
184
+ ]
185
+ }
186
+ return mock_data
187
+
188
+ except Exception as e:
189
+ logger.error(f"CoinGecko data fetch error: {e}")
190
+
191
+ return None
192
+
193
+ async def _fetch_defillama_data(self, chart_type: str, parameters: Dict[str, Any]) -> Optional[Dict[str, Any]]:
194
+ """Fetch data from DeFiLlama API"""
195
+ try:
196
+ if chart_type == "defi_tvl":
197
+ protocols = parameters.get("protocols", ["uniswap", "aave", "compound"])
198
+ # Create mock TVL data
199
+ mock_data = {
200
+ "protocols": [
201
+ {"name": "Uniswap", "tvl": 3500000000, "change_24h": 2.1},
202
+ {"name": "Aave", "tvl": 5200000000, "change_24h": -0.8},
203
+ {"name": "Compound", "tvl": 1800000000, "change_24h": 1.5}
204
+ ]
205
+ }
206
+ return mock_data
207
+
208
+ except Exception as e:
209
+ logger.error(f"DeFiLlama data fetch error: {e}")
210
+
211
+ return None
212
+
213
+ async def _fetch_etherscan_data(self, chart_type: str, parameters: Dict[str, Any]) -> Optional[Dict[str, Any]]:
214
+ """Fetch data from Etherscan API"""
215
+ try:
216
+ if chart_type == "gas_tracker":
217
+ # Create mock gas data
218
+ mock_data = {
219
+ "gas_prices": {
220
+ "safe": 15,
221
+ "standard": 20,
222
+ "fast": 35,
223
+ "instant": 50
224
+ },
225
+ "network": "ethereum"
226
+ }
227
+ return mock_data
228
+
229
+ except Exception as e:
230
+ logger.error(f"Etherscan data fetch error: {e}")
231
+
232
+ return None
233
+
234
+ async def _create_chart(self, chart_type: str, data: Dict[str, Any], parameters: Dict[str, Any]) -> Optional[str]:
235
+ """Create chart using the visualization module"""
236
+ try:
237
+ fig = None
238
+
239
+ if chart_type == "price_chart":
240
+ symbol = parameters.get("symbol", "BTC")
241
+ fig = self.viz.create_price_chart(data, symbol)
242
+
243
+ elif chart_type == "market_overview":
244
+ # Convert dict to list format expected by visualization
245
+ market_data = []
246
+ if isinstance(data, dict) and "data" in data:
247
+ market_data = data["data"]
248
+ elif isinstance(data, list):
249
+ market_data = data
250
+ fig = self.viz.create_market_overview(market_data)
251
+
252
+ elif chart_type == "defi_tvl":
253
+ # Convert to format expected by visualization
254
+ tvl_data = []
255
+ if isinstance(data, dict):
256
+ tvl_data = [data] # Wrap single protocol in list
257
+ elif isinstance(data, list):
258
+ tvl_data = data
259
+ fig = self.viz.create_defi_tvl_chart(tvl_data)
260
+
261
+ elif chart_type == "portfolio_pie":
262
+ portfolio_data = parameters.get("portfolio", {})
263
+ if not portfolio_data and isinstance(data, dict):
264
+ portfolio_data = data
265
+ fig = self.viz.create_portfolio_pie_chart(portfolio_data)
266
+
267
+ elif chart_type == "gas_tracker":
268
+ fig = self.viz.create_gas_tracker(data)
269
+
270
+ if fig:
271
+ # Convert to HTML
272
+ chart_html = fig.to_html(
273
+ include_plotlyjs='cdn',
274
+ div_id=f"chart_{chart_type}_{datetime.now().strftime('%Y%m%d_%H%M%S')}",
275
+ config={'displayModeBar': True, 'responsive': True}
276
+ )
277
+
278
+ # Store chart for later retrieval (you could save to database/cache here)
279
+ return chart_html
280
+
281
+ return None
282
+
283
+ except Exception as e:
284
+ logger.error(f"Chart creation error: {e}")
285
+ return None
286
+
287
+ def get_chart_suggestions(self, query: str) -> List[Dict[str, Any]]:
288
+ """Generate chart suggestions based on user query"""
289
+ suggestions = []
290
+
291
+ query_lower = query.lower()
292
+
293
+ # Price-related queries
294
+ if any(word in query_lower for word in ["price", "chart", "trend", "bitcoin", "ethereum", "crypto"]):
295
+ suggestions.append({
296
+ "chart_type": "price_chart",
297
+ "description": "Price and volume chart with historical data",
298
+ "parameters": {"symbol": "bitcoin", "days": 30},
299
+ "data_source": "coingecko"
300
+ })
301
+
302
+ # Market overview queries
303
+ if any(word in query_lower for word in ["market", "overview", "top", "comparison", "ranking"]):
304
+ suggestions.append({
305
+ "chart_type": "market_overview",
306
+ "description": "Market cap and performance overview of top cryptocurrencies",
307
+ "parameters": {"limit": 20},
308
+ "data_source": "coingecko"
309
+ })
310
+
311
+ # DeFi queries
312
+ if any(word in query_lower for word in ["defi", "tvl", "protocol", "uniswap", "aave", "compound"]):
313
+ suggestions.append({
314
+ "chart_type": "defi_tvl",
315
+ "description": "DeFi protocol Total Value Locked comparison",
316
+ "parameters": {"protocols": ["uniswap", "aave", "compound"]},
317
+ "data_source": "defillama"
318
+ })
319
+
320
+ # Gas fee queries
321
+ if any(word in query_lower for word in ["gas", "fee", "ethereum", "network", "transaction"]):
322
+ suggestions.append({
323
+ "chart_type": "gas_tracker",
324
+ "description": "Ethereum gas fee tracker",
325
+ "parameters": {"network": "ethereum"},
326
+ "data_source": "etherscan"
327
+ })
328
+
329
+ # Portfolio queries
330
+ if any(word in query_lower for word in ["portfolio", "allocation", "distribution", "holdings"]):
331
+ suggestions.append({
332
+ "chart_type": "portfolio_pie",
333
+ "description": "Portfolio allocation pie chart",
334
+ "parameters": {"portfolio": {"BTC": 40, "ETH": 30, "ADA": 20, "DOT": 10}},
335
+ "data_source": "custom"
336
+ })
337
+
338
+ return suggestions[:3] # Return top 3 suggestions
339
+
340
+ def _parse_timeframe(self, timeframe: str) -> int:
341
+ """Convert timeframe string to days"""
342
+ timeframe_map = {
343
+ "1d": 1, "7d": 7, "30d": 30, "90d": 90, "365d": 365, "1y": 365
344
+ }
345
+ return timeframe_map.get(timeframe, 30)
346
+
347
+ def _get_data_source(self, chart_type: str) -> str:
348
+ """Determine appropriate data source for chart type"""
349
+ source_map = {
350
+ "price_chart": "coingecko",
351
+ "market_overview": "coingecko",
352
+ "defi_tvl": "defillama",
353
+ "portfolio_pie": "custom",
354
+ "gas_tracker": "etherscan"
355
+ }
356
+ return source_map.get(chart_type, "coingecko")
src/tools/chart_data_tool.py ADDED
@@ -0,0 +1,390 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain.tools import BaseTool
2
+ from pydantic import BaseModel, Field
3
+ from typing import Dict, Any, List, Optional
4
+ import json
5
+ import asyncio
6
+
7
+ from src.utils.logger import get_logger
8
+
9
+ logger = get_logger(__name__)
10
+
11
+ class ChartDataInput(BaseModel):
12
+ """Input schema for chart data requests"""
13
+ chart_type: str = Field(description="Chart type: price_chart, market_overview, defi_tvl, portfolio_pie, gas_tracker")
14
+ symbol: Optional[str] = Field(default=None, description="Asset symbol (e.g., bitcoin, ethereum)")
15
+ timeframe: Optional[str] = Field(default="30d", description="Time range: 1d, 7d, 30d, 90d, 365d")
16
+ protocols: Optional[List[str]] = Field(default=None, description="DeFi protocol names")
17
+ network: Optional[str] = Field(default="ethereum", description="Blockchain network")
18
+
19
+ class ChartDataTool(BaseTool):
20
+ """
21
+ Chart Data Provider Tool
22
+
23
+ This tool provides structured data that can be used to create charts.
24
+ Instead of returning HTML, it returns clean JSON data for visualization.
25
+ """
26
+
27
+ name: str = "chart_data_provider"
28
+ description: str = """Provides structured data for creating cryptocurrency charts.
29
+
30
+ Returns JSON data in this format:
31
+ {{
32
+ "chart_type": "price_chart|market_overview|defi_tvl|portfolio_pie|gas_tracker",
33
+ "data": {{...}},
34
+ "config": {{...}}
35
+ }}
36
+
37
+ Chart types:
38
+ - price_chart: Bitcoin/crypto price and volume data
39
+ - market_overview: Top cryptocurrencies market data
40
+ - defi_tvl: DeFi protocol TVL comparison
41
+ - portfolio_pie: Portfolio allocation breakdown
42
+ - gas_tracker: Gas fees across networks
43
+ """
44
+
45
+ args_schema: type[ChartDataInput] = ChartDataInput
46
+
47
+ def _run(self, chart_type: str, symbol: str = None, timeframe: str = "30d",
48
+ protocols: List[str] = None, network: str = "ethereum") -> str:
49
+ """Synchronous execution"""
50
+ return asyncio.run(self._arun(chart_type, symbol, timeframe, protocols, network))
51
+
52
+ async def _arun(self, chart_type: str, symbol: str = None, timeframe: str = "30d",
53
+ protocols: List[str] = None, network: str = "ethereum") -> str:
54
+ """Asynchronous execution with proper session cleanup"""
55
+ try:
56
+ logger.info(f"Providing {chart_type} data for {symbol or 'general'}")
57
+
58
+ if chart_type == "price_chart":
59
+ if not symbol:
60
+ symbol = "bitcoin" # Default symbol
61
+ days = {"1d": 1, "7d": 7, "30d": 30, "90d": 90, "365d": 365}.get(timeframe, 30)
62
+ result = await self._get_price_chart_data(symbol, days)
63
+ elif chart_type == "market_overview":
64
+ result = await self._get_market_overview_data()
65
+ elif chart_type == "defi_tvl":
66
+ result = await self._get_defi_tvl_data(protocols)
67
+ elif chart_type == "portfolio_pie":
68
+ result = await self._get_portfolio_data()
69
+ elif chart_type == "gas_tracker":
70
+ result = await self._get_gas_tracker_data(network)
71
+ else:
72
+ result = json.dumps({
73
+ "chart_type": "error",
74
+ "error": f"Unknown chart type: {chart_type}",
75
+ "available_types": ["price_chart", "market_overview", "defi_tvl", "portfolio_pie", "gas_tracker"]
76
+ })
77
+
78
+ return result
79
+
80
+ except Exception as e:
81
+ logger.error(f"Chart data generation failed: {e}")
82
+ return json.dumps({
83
+ "chart_type": "error",
84
+ "error": str(e),
85
+ "message": "Failed to generate chart data"
86
+ })
87
+ finally:
88
+ # Ensure session cleanup
89
+ await self.cleanup()
90
+
91
+ async def _get_price_chart_data(self, symbol: str, days: int) -> str:
92
+ """Get price chart data with fallback for API failures"""
93
+ cryptocompare_tool = None
94
+ try:
95
+ # First try to get real data from CryptoCompare (we have this API key)
96
+ from src.tools.cryptocompare_tool import CryptoCompareTool
97
+
98
+ cryptocompare_tool = CryptoCompareTool()
99
+
100
+ # Map common symbols to CryptoCompare format
101
+ symbol_map = {
102
+ "btc": "BTC", "bitcoin": "BTC",
103
+ "eth": "ethereum", "ethereum": "ETH",
104
+ "sol": "SOL", "solana": "SOL",
105
+ "ada": "ADA", "cardano": "ADA",
106
+ "bnb": "BNB", "binance": "BNB",
107
+ "matic": "MATIC", "polygon": "MATIC",
108
+ "avax": "AVAX", "avalanche": "AVAX",
109
+ "dot": "DOT", "polkadot": "DOT",
110
+ "link": "LINK", "chainlink": "LINK",
111
+ "uni": "UNI", "uniswap": "UNI"
112
+ }
113
+
114
+ crypto_symbol = symbol_map.get(symbol.lower(), symbol.upper())
115
+
116
+ try:
117
+ # Use CryptoCompare for price data
118
+ query = f"{crypto_symbol} price historical {days} days"
119
+ data_result = await cryptocompare_tool._arun(query, {"type": "price_history", "days": days})
120
+
121
+ if data_result and not data_result.startswith("❌") and not data_result.startswith("⚠️"):
122
+ return json.dumps({
123
+ "chart_type": "price_chart",
124
+ "data": {
125
+ "source": "cryptocompare",
126
+ "raw_data": data_result,
127
+ "symbol": crypto_symbol,
128
+ "name": symbol.title()
129
+ },
130
+ "config": {
131
+ "title": f"{symbol.title()} Price Analysis ({days} days)",
132
+ "timeframe": f"{days}d",
133
+ "currency": "USD"
134
+ }
135
+ })
136
+ else:
137
+ raise Exception("No valid price data from CryptoCompare")
138
+
139
+ except Exception as api_error:
140
+ logger.error(f"CryptoCompare price data failed: {api_error}")
141
+ # Fallback to mock data on any API error
142
+ logger.info(f"Using fallback mock data for {symbol}")
143
+ return await self._get_mock_price_data(symbol, days)
144
+
145
+ except Exception as e:
146
+ logger.error(f"Price chart data generation failed: {e}")
147
+ # Final fallback to mock data
148
+ return await self._get_mock_price_data(symbol, days)
149
+ finally:
150
+ # Cleanup CryptoCompare tool session
151
+ if cryptocompare_tool and hasattr(cryptocompare_tool, 'cleanup'):
152
+ try:
153
+ await cryptocompare_tool.cleanup()
154
+ except Exception:
155
+ pass # Ignore cleanup errors
156
+
157
+ async def _get_mock_price_data(self, symbol: str, days: int) -> str:
158
+ """Fallback mock price data"""
159
+ import time
160
+ import random
161
+
162
+ base_price = 35000 if symbol.lower() == "bitcoin" else 1800 if symbol.lower() == "ethereum" else 100
163
+ base_timestamp = int(time.time() * 1000) - (days * 24 * 60 * 60 * 1000)
164
+
165
+ price_data = []
166
+ volume_data = []
167
+
168
+ for i in range(days):
169
+ timestamp = base_timestamp + (i * 24 * 60 * 60 * 1000)
170
+ price_change = random.uniform(-0.05, 0.05)
171
+ price = base_price * (1 + price_change * i / days)
172
+ price += random.uniform(-price*0.02, price*0.02)
173
+ volume = random.uniform(1000000000, 5000000000)
174
+
175
+ price_data.append([timestamp, round(price, 2)])
176
+ volume_data.append([timestamp, int(volume)])
177
+
178
+ return json.dumps({
179
+ "chart_type": "price_chart",
180
+ "data": {
181
+ "prices": price_data,
182
+ "total_volumes": volume_data,
183
+ "symbol": symbol.upper(),
184
+ "name": symbol.title()
185
+ },
186
+ "config": {
187
+ "title": f"{symbol.title()} Price Analysis ({days} days)",
188
+ "timeframe": f"{days}d",
189
+ "currency": "USD"
190
+ }
191
+ })
192
+
193
+ async def _get_market_overview_data(self) -> str:
194
+ """Get market overview data using CryptoCompare API"""
195
+ cryptocompare_tool = None
196
+ try:
197
+ from src.tools.cryptocompare_tool import CryptoCompareTool
198
+
199
+ cryptocompare_tool = CryptoCompareTool()
200
+
201
+ # Get market overview using CryptoCompare
202
+ query = "top cryptocurrencies market cap overview"
203
+ data_result = await cryptocompare_tool._arun(query, {"type": "market_overview"})
204
+
205
+ if data_result and not data_result.startswith("❌") and not data_result.startswith("⚠️"):
206
+ return json.dumps({
207
+ "chart_type": "market_overview",
208
+ "data": {
209
+ "source": "cryptocompare",
210
+ "raw_data": data_result
211
+ },
212
+ "config": {
213
+ "title": "Top Cryptocurrencies Market Overview",
214
+ "currency": "USD"
215
+ }
216
+ })
217
+ else:
218
+ raise Exception("No valid market data from CryptoCompare")
219
+
220
+ except Exception as e:
221
+ logger.error(f"Market overview API failed: {e}")
222
+ return await self._get_mock_market_data()
223
+ finally:
224
+ # Cleanup CryptoCompare tool session
225
+ if cryptocompare_tool and hasattr(cryptocompare_tool, 'cleanup'):
226
+ try:
227
+ await cryptocompare_tool.cleanup()
228
+ except Exception:
229
+ pass # Ignore cleanup errors
230
+
231
+ async def _get_mock_market_data(self) -> str:
232
+ """Fallback mock market data"""
233
+ return json.dumps({
234
+ "chart_type": "market_overview",
235
+ "data": {
236
+ "coins": [
237
+ {"name": "Bitcoin", "symbol": "BTC", "current_price": 35000, "market_cap_rank": 1, "price_change_percentage_24h": 2.5},
238
+ {"name": "Ethereum", "symbol": "ETH", "current_price": 1800, "market_cap_rank": 2, "price_change_percentage_24h": -1.2},
239
+ {"name": "Cardano", "symbol": "ADA", "current_price": 0.25, "market_cap_rank": 3, "price_change_percentage_24h": 3.1},
240
+ {"name": "Solana", "symbol": "SOL", "current_price": 22.5, "market_cap_rank": 4, "price_change_percentage_24h": -2.8},
241
+ {"name": "Polygon", "symbol": "MATIC", "current_price": 0.52, "market_cap_rank": 5, "price_change_percentage_24h": 1.9}
242
+ ]
243
+ },
244
+ "config": {
245
+ "title": "Top Cryptocurrencies Market Overview",
246
+ "currency": "USD"
247
+ }
248
+ })
249
+
250
+ async def _get_defi_tvl_data(self, protocols: List[str]) -> str:
251
+ """Get real DeFi TVL data from DeFiLlama API"""
252
+ try:
253
+ from src.tools.defillama_tool import DeFiLlamaTool
254
+
255
+ defillama = DeFiLlamaTool()
256
+
257
+ # Get protocols data
258
+ data = await defillama.make_request(f"{defillama._base_url}/protocols")
259
+
260
+ if not data:
261
+ logger.warning("DeFiLlama API failed, using fallback")
262
+ return await self._get_mock_defi_data(protocols)
263
+
264
+ # Filter for requested protocols or top protocols
265
+ if protocols:
266
+ filtered_protocols = []
267
+ for protocol_name in protocols:
268
+ for protocol in data:
269
+ if protocol_name.lower() in protocol.get("name", "").lower():
270
+ filtered_protocols.append(protocol)
271
+ break
272
+ protocols_data = filtered_protocols[:8] # Limit to 8
273
+ else:
274
+ # Get top protocols by TVL (filter out None values)
275
+ valid_protocols = [p for p in data if p.get("tvl") is not None and p.get("tvl", 0) > 0]
276
+ protocols_data = sorted(valid_protocols, key=lambda x: x.get("tvl", 0), reverse=True)[:8]
277
+
278
+ if not protocols_data:
279
+ return await self._get_mock_defi_data(protocols)
280
+
281
+ # Format TVL data
282
+ tvl_data = []
283
+ for protocol in protocols_data:
284
+ tvl_data.append({
285
+ "name": protocol.get("name", "Unknown"),
286
+ "tvl": protocol.get("tvl", 0),
287
+ "change_1d": protocol.get("change_1d", 0),
288
+ "chain": protocol.get("chain", "Multi-chain"),
289
+ "category": protocol.get("category", "DeFi")
290
+ })
291
+
292
+ return json.dumps({
293
+ "chart_type": "defi_tvl",
294
+ "data": {"protocols": tvl_data},
295
+ "config": {
296
+ "title": "DeFi Protocols by Total Value Locked",
297
+ "currency": "USD"
298
+ }
299
+ })
300
+
301
+ except Exception as e:
302
+ logger.error(f"DeFi TVL API failed: {e}")
303
+ return await self._get_mock_defi_data(protocols)
304
+
305
+ async def _get_mock_defi_data(self, protocols: List[str]) -> str:
306
+ """Fallback mock DeFi data"""
307
+ import random
308
+
309
+ protocol_names = protocols or ["Uniswap", "Aave", "Compound", "Curve", "MakerDAO"]
310
+ tvl_data = []
311
+
312
+ for protocol in protocol_names[:5]:
313
+ tvl = random.uniform(500000000, 5000000000)
314
+ change = random.uniform(-10, 15)
315
+ tvl_data.append({
316
+ "name": protocol,
317
+ "tvl": tvl,
318
+ "change_1d": change,
319
+ "chain": "Ethereum",
320
+ "category": "DeFi"
321
+ })
322
+
323
+ return json.dumps({
324
+ "chart_type": "defi_tvl",
325
+ "data": {"protocols": tvl_data},
326
+ "config": {
327
+ "title": "DeFi Protocols by Total Value Locked",
328
+ "currency": "USD"
329
+ }
330
+ })
331
+
332
+ async def _get_portfolio_data(self) -> str:
333
+ """Get portfolio allocation data"""
334
+ return json.dumps({
335
+ "chart_type": "portfolio_pie",
336
+ "data": {
337
+ "allocations": [
338
+ {"name": "Bitcoin", "symbol": "BTC", "value": 40, "color": "#f7931a"},
339
+ {"name": "Ethereum", "symbol": "ETH", "value": 30, "color": "#627eea"},
340
+ {"name": "Cardano", "symbol": "ADA", "value": 15, "color": "#0033ad"},
341
+ {"name": "Solana", "symbol": "SOL", "value": 10, "color": "#9945ff"},
342
+ {"name": "Other", "symbol": "OTHER", "value": 5, "color": "#666666"}
343
+ ]
344
+ },
345
+ "config": {
346
+ "title": "Sample Portfolio Allocation",
347
+ "currency": "Percentage"
348
+ }
349
+ })
350
+
351
+ async def _get_gas_data(self, network: str) -> str:
352
+ """Get gas fee data"""
353
+ import random
354
+ import time
355
+
356
+ # Generate 24 hours of gas data
357
+ gas_data = []
358
+ base_timestamp = int(time.time() * 1000) - (24 * 60 * 60 * 1000)
359
+
360
+ for i in range(24):
361
+ timestamp = base_timestamp + (i * 60 * 60 * 1000)
362
+ gas_price = random.uniform(20, 100) if network == "ethereum" else random.uniform(1, 10)
363
+ gas_data.append([timestamp, round(gas_price, 2)])
364
+
365
+ return json.dumps({
366
+ "chart_type": "gas_tracker",
367
+ "data": {
368
+ "gas_prices": gas_data,
369
+ "network": network.title()
370
+ },
371
+ "config": {
372
+ "title": f"{network.title()} Gas Fee Tracker (24h)",
373
+ "unit": "Gwei"
374
+ }
375
+ })
376
+
377
+ def _parse_timeframe(self, timeframe: str) -> int:
378
+ """Convert timeframe string to days"""
379
+ timeframe_map = {
380
+ "1d": 1, "7d": 7, "30d": 30, "90d": 90, "365d": 365, "1y": 365
381
+ }
382
+ return timeframe_map.get(timeframe, 30)
383
+
384
+ async def cleanup(self):
385
+ """Cleanup method for session management"""
386
+ # ChartDataTool creates temporary tools that may have sessions
387
+ # Since we don't maintain persistent references, sessions should auto-close
388
+ # But we can force garbage collection to ensure cleanup
389
+ import gc
390
+ gc.collect()
src/tools/coingecko_tool.py CHANGED
@@ -23,7 +23,7 @@ class CoinGeckoTool(BaseWeb3Tool):
23
  def __init__(self):
24
  super().__init__()
25
 
26
- async def _arun(self, query: str, filters: Optional[Dict[str, Any]] = None) -> str:
27
  filters = filters or {}
28
  try:
29
  # Check cache first
 
23
  def __init__(self):
24
  super().__init__()
25
 
26
+ async def _arun(self, query: str, filters: Optional[Dict[str, Any]] = None, **kwargs) -> str:
27
  filters = filters or {}
28
  try:
29
  # Check cache first
src/tools/cryptocompare_tool.py ADDED
@@ -0,0 +1,253 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Dict, Any, Optional
2
+ from pydantic import BaseModel, PrivateAttr
3
+ from src.tools.base_tool import BaseWeb3Tool, Web3ToolInput
4
+ from src.utils.config import config
5
+ from src.utils.logger import get_logger
6
+
7
+ logger = get_logger(__name__)
8
+
9
+ class CryptoCompareTool(BaseWeb3Tool):
10
+ name: str = "cryptocompare_data"
11
+ description: str = """Get cryptocurrency price, volume, and market data from CryptoCompare API.
12
+ Useful for: real-time prices, historical data, market analysis, volume tracking.
13
+ Input: cryptocurrency symbol or query (e.g., BTC, ETH, price analysis)."""
14
+ args_schema: type[BaseModel] = Web3ToolInput
15
+
16
+ _base_url: str = PrivateAttr(default="https://min-api.cryptocompare.com/data")
17
+
18
+ def __init__(self):
19
+ super().__init__()
20
+ # Store API key as instance variable instead of using Pydantic field
21
+ self._api_key = config.CRYPTOCOMPARE_API_KEY
22
+
23
+ async def _arun(self, query: str, filters: Optional[Dict[str, Any]] = None, **kwargs) -> str:
24
+ """Get crypto data from CryptoCompare API"""
25
+ try:
26
+ filters = filters or {}
27
+ query_lower = query.lower()
28
+
29
+ # Extract cryptocurrency symbols
30
+ common_symbols = {
31
+ "bitcoin": "BTC", "btc": "BTC",
32
+ "ethereum": "ETH", "eth": "ETH",
33
+ "solana": "SOL", "sol": "SOL",
34
+ "cardano": "ADA", "ada": "ADA",
35
+ "polygon": "MATIC", "matic": "MATIC",
36
+ "avalanche": "AVAX", "avax": "AVAX",
37
+ "chainlink": "LINK", "link": "LINK",
38
+ "uniswap": "UNI", "uni": "UNI",
39
+ "polkadot": "DOT", "dot": "DOT",
40
+ "binance": "BNB", "bnb": "BNB"
41
+ }
42
+
43
+ # Find symbol in query
44
+ symbol = None
45
+ for key, value in common_symbols.items():
46
+ if key in query_lower:
47
+ symbol = value
48
+ break
49
+
50
+ if not symbol:
51
+ # Try to extract uppercase words as potential symbols
52
+ words = query.upper().split()
53
+ potential_symbols = [w for w in words if w.isalpha() and len(w) <= 5]
54
+ symbol = potential_symbols[0] if potential_symbols else "BTC"
55
+
56
+ # Determine data type needed
57
+ if any(word in query_lower for word in ["price", "cost", "value", "current"]):
58
+ return await self._get_current_price(symbol)
59
+ elif any(word in query_lower for word in ["history", "historical", "trend", "chart"]):
60
+ return await self._get_historical_data(symbol)
61
+ elif any(word in query_lower for word in ["volume", "trading"]):
62
+ return await self._get_volume_data(symbol)
63
+ else:
64
+ # Default to current price + basic stats
65
+ return await self._get_current_price(symbol)
66
+
67
+ except Exception as e:
68
+ logger.error(f"CryptoCompare error: {e}")
69
+ return f"⚠️ CryptoCompare data temporarily unavailable: {str(e)}"
70
+
71
+ async def _get_current_price(self, symbol: str) -> str:
72
+ """Get current price and basic stats"""
73
+ try:
74
+ # Current price endpoint
75
+ params = {
76
+ "fsym": symbol,
77
+ "tsyms": "USD,EUR,BTC",
78
+ "extraParams": "Web3ResearchAgent"
79
+ }
80
+
81
+ if self._api_key:
82
+ params["api_key"] = self._api_key
83
+
84
+ price_data = await self.make_request(f"{self._base_url}/price", params=params)
85
+
86
+ if not price_data:
87
+ return f"❌ No price data available for {symbol}"
88
+
89
+ # Get additional stats
90
+ stats_params = {
91
+ "fsym": symbol,
92
+ "tsym": "USD",
93
+ "extraParams": "Web3ResearchAgent"
94
+ }
95
+
96
+ if self._api_key:
97
+ stats_params["api_key"] = self._api_key
98
+
99
+ stats_data = await self.make_request(f"{self._base_url}/pricemultifull", params=stats_params)
100
+
101
+ # Format response
102
+ usd_price = price_data.get("USD", 0)
103
+ eur_price = price_data.get("EUR", 0)
104
+ btc_price = price_data.get("BTC", 0)
105
+
106
+ result = f"💰 **{symbol} Current Price** (CryptoCompare):\n\n"
107
+ result += f"🇺🇸 **USD**: ${usd_price:,.2f}\n"
108
+
109
+ if eur_price > 0:
110
+ result += f"🇪🇺 **EUR**: €{eur_price:,.2f}\n"
111
+ if btc_price > 0:
112
+ result += f"₿ **BTC**: {btc_price:.8f}\n"
113
+
114
+ # Add stats if available
115
+ if stats_data and "RAW" in stats_data:
116
+ raw_data = stats_data["RAW"].get(symbol, {}).get("USD", {})
117
+
118
+ if raw_data:
119
+ change_24h = raw_data.get("CHANGEPCT24HOUR", 0)
120
+ volume_24h = raw_data.get("VOLUME24HOUR", 0)
121
+ market_cap = raw_data.get("MKTCAP", 0)
122
+
123
+ emoji = "📈" if change_24h >= 0 else "📉"
124
+ result += f"\n📊 **24h Change**: {change_24h:+.2f}% {emoji}\n"
125
+
126
+ if volume_24h > 0:
127
+ result += f"📈 **24h Volume**: ${volume_24h:,.0f}\n"
128
+
129
+ if market_cap > 0:
130
+ result += f"🏦 **Market Cap**: ${market_cap:,.0f}\n"
131
+
132
+ result += f"\n🕒 *Real-time data from CryptoCompare*"
133
+ return result
134
+
135
+ except Exception as e:
136
+ logger.error(f"Price data error: {e}")
137
+ return f"⚠️ Unable to fetch {symbol} price data"
138
+
139
+ async def _get_historical_data(self, symbol: str, days: int = 30) -> str:
140
+ """Get historical price data"""
141
+ try:
142
+ params = {
143
+ "fsym": symbol,
144
+ "tsym": "USD",
145
+ "limit": min(days, 365),
146
+ "extraParams": "Web3ResearchAgent"
147
+ }
148
+
149
+ if self._api_key:
150
+ params["api_key"] = self._api_key
151
+
152
+ hist_data = await self.make_request(f"{self._base_url}/histoday", params=params)
153
+
154
+ if not hist_data or "Data" not in hist_data:
155
+ return f"❌ No historical data available for {symbol}"
156
+
157
+ data_points = hist_data["Data"]
158
+ if not data_points:
159
+ return f"❌ No historical data points for {symbol}"
160
+
161
+ # Get first and last prices
162
+ first_price = data_points[0].get("close", 0)
163
+ last_price = data_points[-1].get("close", 0)
164
+
165
+ # Calculate performance
166
+ if first_price > 0:
167
+ performance = ((last_price - first_price) / first_price) * 100
168
+ performance_emoji = "📈" if performance >= 0 else "📉"
169
+ else:
170
+ performance = 0
171
+ performance_emoji = "➡️"
172
+
173
+ # Find highest and lowest
174
+ high_price = max([p.get("high", 0) for p in data_points])
175
+ low_price = min([p.get("low", 0) for p in data_points if p.get("low", 0) > 0])
176
+
177
+ result = f"📊 **{symbol} Historical Analysis** ({days} days):\n\n"
178
+ result += f"💲 **Starting Price**: ${first_price:,.2f}\n"
179
+ result += f"💲 **Current Price**: ${last_price:,.2f}\n"
180
+ result += f"📊 **Performance**: {performance:+.2f}% {performance_emoji}\n\n"
181
+
182
+ result += f"🔝 **Period High**: ${high_price:,.2f}\n"
183
+ result += f"🔻 **Period Low**: ${low_price:,.2f}\n"
184
+
185
+ # Calculate volatility (simplified)
186
+ price_changes = []
187
+ for i in range(1, len(data_points)):
188
+ prev_close = data_points[i-1].get("close", 0)
189
+ curr_close = data_points[i].get("close", 0)
190
+ if prev_close > 0:
191
+ change = abs((curr_close - prev_close) / prev_close) * 100
192
+ price_changes.append(change)
193
+
194
+ if price_changes:
195
+ avg_volatility = sum(price_changes) / len(price_changes)
196
+ result += f"📈 **Avg Daily Volatility**: {avg_volatility:.2f}%\n"
197
+
198
+ result += f"\n🕒 *Data from CryptoCompare*"
199
+ return result
200
+
201
+ except Exception as e:
202
+ logger.error(f"Historical data error: {e}")
203
+ return f"⚠️ Unable to fetch historical data for {symbol}"
204
+
205
+ async def _get_volume_data(self, symbol: str) -> str:
206
+ """Get volume and trading data"""
207
+ try:
208
+ params = {
209
+ "fsym": symbol,
210
+ "tsym": "USD",
211
+ "extraParams": "Web3ResearchAgent"
212
+ }
213
+
214
+ if self._api_key:
215
+ params["api_key"] = self._api_key
216
+
217
+ volume_data = await self.make_request(f"{self._base_url}/pricemultifull", params=params)
218
+
219
+ if not volume_data or "RAW" not in volume_data:
220
+ return f"❌ No volume data available for {symbol}"
221
+
222
+ raw_data = volume_data["RAW"].get(symbol, {}).get("USD", {})
223
+
224
+ if not raw_data:
225
+ return f"❌ No trading data found for {symbol}"
226
+
227
+ volume_24h = raw_data.get("VOLUME24HOUR", 0)
228
+ volume_24h_to = raw_data.get("VOLUME24HOURTO", 0)
229
+ total_volume = raw_data.get("TOTALVOLUME24H", 0)
230
+
231
+ result = f"📈 **{symbol} Trading Volume**:\n\n"
232
+ result += f"📊 **24h Volume**: {volume_24h:,.0f} {symbol}\n"
233
+ result += f"💰 **24h Volume (USD)**: ${volume_24h_to:,.0f}\n"
234
+
235
+ if total_volume > 0:
236
+ result += f"🌐 **Total 24h Volume**: ${total_volume:,.0f}\n"
237
+
238
+ # Additional trading info
239
+ open_price = raw_data.get("OPEN24HOUR", 0)
240
+ high_price = raw_data.get("HIGH24HOUR", 0)
241
+ low_price = raw_data.get("LOW24HOUR", 0)
242
+
243
+ if open_price > 0:
244
+ result += f"\n📊 **24h Open**: ${open_price:,.2f}\n"
245
+ result += f"🔝 **24h High**: ${high_price:,.2f}\n"
246
+ result += f"🔻 **24h Low**: ${low_price:,.2f}\n"
247
+
248
+ result += f"\n🕒 *Trading data from CryptoCompare*"
249
+ return result
250
+
251
+ except Exception as e:
252
+ logger.error(f"Volume data error: {e}")
253
+ return f"⚠️ Unable to fetch volume data for {symbol}"
src/tools/defillama_tool.py CHANGED
@@ -2,14 +2,16 @@ from typing import Dict, Any, Optional
2
  from pydantic import BaseModel, PrivateAttr
3
  from src.tools.base_tool import BaseWeb3Tool, Web3ToolInput
4
  from src.utils.logger import get_logger
 
 
5
 
6
  logger = get_logger(__name__)
7
 
8
  class DeFiLlamaTool(BaseWeb3Tool):
9
  name: str = "defillama_data"
10
- description: str = """Get DeFi protocol data, TVL, and yields from DeFiLlama.
11
- Useful for: DeFi analysis, protocol rankings, TVL trends, yield farming data.
12
- Input: protocol name or general DeFi query."""
13
  args_schema: type[BaseModel] = Web3ToolInput
14
 
15
  _base_url: str = PrivateAttr(default="https://api.llama.fi")
@@ -17,151 +19,297 @@ class DeFiLlamaTool(BaseWeb3Tool):
17
  def __init__(self):
18
  super().__init__()
19
 
20
- async def _arun(self, query: str, filters: Optional[Dict[str, Any]] = None) -> str:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21
  try:
22
  filters = filters or {}
 
23
 
24
- if filters.get("type") == "tvl_overview":
25
- return await self._get_tvl_overview()
26
- elif filters.get("type") == "protocol_data":
27
  return await self._get_protocol_data(query)
28
- elif query:
29
- return await self._search_protocols(query)
30
- else:
 
 
31
  return await self._get_top_protocols()
 
 
32
 
33
  except Exception as e:
34
  logger.error(f"DeFiLlama error: {e}")
35
  return f"⚠️ DeFiLlama service temporarily unavailable: {str(e)}"
36
 
37
  async def _get_top_protocols(self) -> str:
 
38
  try:
39
  data = await self.make_request(f"{self._base_url}/protocols")
40
 
41
  if not data or not isinstance(data, list):
42
  return "⚠️ DeFi protocol data temporarily unavailable"
43
 
44
- if len(data) == 0:
45
- return " No DeFi protocols found"
46
-
47
- # Filter and validate protocols
48
- valid_protocols = []
49
- for protocol in data:
50
- try:
51
- tvl = protocol.get("tvl", 0)
52
- if tvl is not None and tvl > 0:
53
- valid_protocols.append(protocol)
54
- except (TypeError, ValueError):
55
- continue
56
 
57
- if not valid_protocols:
58
  return "⚠️ No valid protocol data available"
59
 
60
- # Sort by TVL and take top 10
61
- top_protocols = sorted(valid_protocols, key=lambda x: x.get("tvl", 0), reverse=True)[:10]
62
-
63
  result = "🏦 **Top DeFi Protocols by TVL:**\n\n"
64
 
65
  for i, protocol in enumerate(top_protocols, 1):
66
- try:
67
- name = protocol.get("name", "Unknown")
68
- tvl = protocol.get("tvl", 0)
69
- change = protocol.get("change_1d", 0)
70
- chain = protocol.get("chain", "Multi-chain")
71
-
72
- # Handle edge cases
73
- if tvl <= 0:
74
- continue
75
-
76
- emoji = "📈" if change >= 0 else "📉"
77
- tvl_formatted = f"${tvl/1e9:.2f}B" if tvl >= 1e9 else f"${tvl/1e6:.1f}M"
78
- change_formatted = f"({change:+.2f}%)" if change is not None else "(N/A)"
79
-
80
- result += f"{i}. **{name}** ({chain}): {tvl_formatted} TVL {emoji} {change_formatted}\n"
81
-
82
- except (TypeError, KeyError, ValueError) as e:
83
- logger.warning(f"Skipping invalid protocol data: {e}")
84
- continue
85
-
86
- return result if len(result.split('\n')) > 3 else "⚠️ Unable to format protocol data properly"
87
 
88
  except Exception as e:
89
  logger.error(f"Top protocols error: {e}")
90
  return "⚠️ DeFi protocol data temporarily unavailable"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
91
 
92
  async def _get_tvl_overview(self) -> str:
 
93
  try:
94
- protocols_data = await self.make_request(f"{self.base_url}/protocols")
95
- chains_data = await self.make_request(f"{self.base_url}/chains")
 
96
 
97
- if not protocols_data or not chains_data:
98
- return "TVL overview data unavailable"
99
 
100
- total_tvl = sum(p.get("tvl", 0) for p in protocols_data)
101
- top_chains = sorted(chains_data, key=lambda x: x.get("tvl", 0), reverse=True)[:5]
102
 
103
  result = "🌐 **DeFi TVL Overview:**\n\n"
104
- result += f"💰 **Total TVL**: ${total_tvl/1e9:.2f}B\n\n"
105
- result += "**Top Chains by TVL:**\n"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
106
 
107
- for i, chain in enumerate(top_chains, 1):
108
- name = chain.get("name", "Unknown")
109
- tvl = chain.get("tvl", 0)
110
- result += f"{i}. **{name}**: ${tvl/1e9:.2f}B\n"
 
111
 
112
  return result
113
 
114
- except Exception:
 
115
  return await self._get_top_protocols()
116
-
117
- async def _get_protocol_data(self, protocol: str) -> str:
118
- protocols = await self.make_request(f"{self.base_url}/protocols")
119
-
120
- if not protocols:
121
- return f"No data available for {protocol}"
122
-
123
- matching_protocol = None
124
- for p in protocols:
125
- if protocol.lower() in p.get("name", "").lower():
126
- matching_protocol = p
127
- break
128
-
129
- if not matching_protocol:
130
- return f"Protocol '{protocol}' not found"
131
-
132
- name = matching_protocol.get("name", "Unknown")
133
- tvl = matching_protocol.get("tvl", 0)
134
- change_1d = matching_protocol.get("change_1d", 0)
135
- change_7d = matching_protocol.get("change_7d", 0)
136
- chain = matching_protocol.get("chain", "Multi-chain")
137
- category = matching_protocol.get("category", "Unknown")
138
-
139
- result = f"🏛️ **{name} Protocol Analysis:**\n\n"
140
- result += f"💰 **TVL**: ${tvl/1e9:.2f}B\n"
141
- result += f"📊 **24h Change**: {change_1d:+.2f}%\n"
142
- result += f"📈 **7d Change**: {change_7d:+.2f}%\n"
143
- result += f"⛓️ **Chain**: {chain}\n"
144
- result += f"🏷️ **Category**: {category}\n"
145
-
146
- return result
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
147
 
148
  async def _search_protocols(self, query: str) -> str:
149
- protocols = await self.make_request(f"{self.base_url}/protocols")
150
-
151
- if not protocols:
152
- return "No protocol data available"
153
-
154
- matching = [p for p in protocols if query.lower() in p.get("name", "").lower()][:5]
155
-
156
- if not matching:
157
- return f"No protocols found matching '{query}'"
158
-
159
- result = f"🔍 **Protocols matching '{query}':**\n\n"
160
-
161
- for protocol in matching:
162
- name = protocol.get("name", "Unknown")
163
- tvl = protocol.get("tvl", 0)
164
- chain = protocol.get("chain", "Multi-chain")
165
- result += f"• **{name}** ({chain}): ${tvl/1e9:.2f}B TVL\n"
166
-
167
- return result
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
  from pydantic import BaseModel, PrivateAttr
3
  from src.tools.base_tool import BaseWeb3Tool, Web3ToolInput
4
  from src.utils.logger import get_logger
5
+ import aiohttp
6
+ import json
7
 
8
  logger = get_logger(__name__)
9
 
10
  class DeFiLlamaTool(BaseWeb3Tool):
11
  name: str = "defillama_data"
12
+ description: str = """Get real DeFi protocol data, TVL, and yields from DeFiLlama API.
13
+ Useful for: DeFi analysis, protocol rankings, TVL trends, chain analysis.
14
+ Input: protocol name, chain name, or general DeFi query."""
15
  args_schema: type[BaseModel] = Web3ToolInput
16
 
17
  _base_url: str = PrivateAttr(default="https://api.llama.fi")
 
19
  def __init__(self):
20
  super().__init__()
21
 
22
+ async def make_request(self, url: str, timeout: int = 10) -> Optional[Dict[str, Any]]:
23
+ """Make HTTP request to DeFiLlama API"""
24
+ try:
25
+ async with aiohttp.ClientSession() as session:
26
+ async with session.get(url, timeout=aiohttp.ClientTimeout(total=timeout)) as response:
27
+ if response.status == 200:
28
+ data = await response.json()
29
+ logger.info(f"✅ DeFiLlama API call successful: {url}")
30
+ return data
31
+ else:
32
+ logger.error(f"❌ DeFiLlama API error: {response.status} for {url}")
33
+ return None
34
+ except Exception as e:
35
+ logger.error(f"❌ DeFiLlama API request failed: {e}")
36
+ return None
37
+
38
+ async def _arun(self, query: str, filters: Optional[Dict[str, Any]] = None, **kwargs) -> str:
39
  try:
40
  filters = filters or {}
41
+ query_lower = query.lower()
42
 
43
+ # Route based on query type
44
+ if "protocol" in query_lower and any(name in query_lower for name in ["uniswap", "aave", "compound", "curve"]):
 
45
  return await self._get_protocol_data(query)
46
+ elif any(word in query_lower for word in ["chain", "ethereum", "polygon", "avalanche", "bsc"]):
47
+ return await self._get_chain_tvl(query)
48
+ elif "tvl" in query_lower or "total value locked" in query_lower:
49
+ return await self._get_tvl_overview()
50
+ elif "top" in query_lower or "ranking" in query_lower:
51
  return await self._get_top_protocols()
52
+ else:
53
+ return await self._search_protocols(query)
54
 
55
  except Exception as e:
56
  logger.error(f"DeFiLlama error: {e}")
57
  return f"⚠️ DeFiLlama service temporarily unavailable: {str(e)}"
58
 
59
  async def _get_top_protocols(self) -> str:
60
+ """Get top protocols using /protocols endpoint"""
61
  try:
62
  data = await self.make_request(f"{self._base_url}/protocols")
63
 
64
  if not data or not isinstance(data, list):
65
  return "⚠️ DeFi protocol data temporarily unavailable"
66
 
67
+ # Sort by TVL and take top 10
68
+ top_protocols = sorted([p for p in data if p.get("tvl") is not None and p.get("tvl", 0) > 0],
69
+ key=lambda x: x.get("tvl", 0), reverse=True)[:10]
 
 
 
 
 
 
 
 
 
70
 
71
+ if not top_protocols:
72
  return "⚠️ No valid protocol data available"
73
 
 
 
 
74
  result = "🏦 **Top DeFi Protocols by TVL:**\n\n"
75
 
76
  for i, protocol in enumerate(top_protocols, 1):
77
+ name = protocol.get("name", "Unknown")
78
+ tvl = protocol.get("tvl", 0)
79
+ change_1d = protocol.get("change_1d", 0)
80
+ chain = protocol.get("chain", "Multi-chain")
81
+
82
+ emoji = "📈" if change_1d >= 0 else "📉"
83
+ tvl_formatted = f"${tvl/1e9:.2f}B" if tvl >= 1e9 else f"${tvl/1e6:.1f}M"
84
+ change_formatted = f"({change_1d:+.2f}%)" if change_1d is not None else "(N/A)"
85
+
86
+ result += f"{i}. **{name}** ({chain}): {tvl_formatted} TVL {emoji} {change_formatted}\n"
87
+
88
+ return result
 
 
 
 
 
 
 
 
 
89
 
90
  except Exception as e:
91
  logger.error(f"Top protocols error: {e}")
92
  return "⚠️ DeFi protocol data temporarily unavailable"
93
+
94
+ async def _get_protocol_data(self, protocol_name: str) -> str:
95
+ """Get specific protocol data using /protocol/{protocol} endpoint"""
96
+ try:
97
+ # First get all protocols to find the slug
98
+ protocols = await self.make_request(f"{self._base_url}/protocols")
99
+ if not protocols:
100
+ return f"❌ Cannot fetch protocols list"
101
+
102
+ # Find matching protocol
103
+ matching_protocol = None
104
+ for p in protocols:
105
+ if protocol_name.lower() in p.get("name", "").lower():
106
+ matching_protocol = p
107
+ break
108
+
109
+ if not matching_protocol:
110
+ return f"❌ Protocol '{protocol_name}' not found"
111
+
112
+ # Get detailed protocol data
113
+ protocol_slug = matching_protocol.get("slug", protocol_name.lower())
114
+ detailed_data = await self.make_request(f"{self._base_url}/protocol/{protocol_slug}")
115
+
116
+ if detailed_data:
117
+ # Use detailed data if available
118
+ name = detailed_data.get("name", matching_protocol.get("name"))
119
+ tvl = detailed_data.get("tvl", matching_protocol.get("tvl", 0))
120
+ change_1d = detailed_data.get("change_1d", matching_protocol.get("change_1d", 0))
121
+ change_7d = detailed_data.get("change_7d", matching_protocol.get("change_7d", 0))
122
+ chains = detailed_data.get("chains", [matching_protocol.get("chain", "Unknown")])
123
+ category = detailed_data.get("category", matching_protocol.get("category", "Unknown"))
124
+ description = detailed_data.get("description", "No description available")
125
+ else:
126
+ # Fallback to basic protocol data
127
+ name = matching_protocol.get("name", "Unknown")
128
+ tvl = matching_protocol.get("tvl", 0)
129
+ change_1d = matching_protocol.get("change_1d", 0)
130
+ change_7d = matching_protocol.get("change_7d", 0)
131
+ chains = [matching_protocol.get("chain", "Unknown")]
132
+ category = matching_protocol.get("category", "Unknown")
133
+ description = "No description available"
134
+
135
+ result = f"🏛️ **{name} Protocol Analysis:**\n\n"
136
+ result += f"📝 **Description**: {description[:200]}{'...' if len(description) > 200 else ''}\n\n"
137
+ result += f"💰 **Current TVL**: ${tvl/1e9:.2f}B\n"
138
+ result += f"📊 **24h Change**: {change_1d:+.2f}%\n"
139
+ result += f"📈 **7d Change**: {change_7d:+.2f}%\n"
140
+ result += f"⛓️ **Chains**: {', '.join(chains) if isinstance(chains, list) else str(chains)}\n"
141
+ result += f"🏷️ **Category**: {category}\n"
142
+
143
+ return result
144
+
145
+ except Exception as e:
146
+ logger.error(f"Protocol data error: {e}")
147
+ return f"⚠️ Error fetching data for {protocol_name}: {str(e)}"
148
 
149
  async def _get_tvl_overview(self) -> str:
150
+ """Get TVL overview using /protocols and /v2/chains endpoints"""
151
  try:
152
+ # Get protocols and chains data
153
+ protocols_data = await self.make_request(f"{self._base_url}/protocols")
154
+ chains_data = await self.make_request(f"{self._base_url}/v2/chains")
155
 
156
+ if not protocols_data:
157
+ return "⚠️ TVL overview data unavailable"
158
 
159
+ # Calculate total TVL
160
+ total_tvl = sum(p.get("tvl", 0) for p in protocols_data if p.get("tvl") is not None and p.get("tvl", 0) > 0)
161
 
162
  result = "🌐 **DeFi TVL Overview:**\n\n"
163
+ result += f"💰 **Total DeFi TVL**: ${total_tvl/1e9:.2f}B\n\n"
164
+
165
+ # Add chain data if available
166
+ if chains_data and isinstance(chains_data, list):
167
+ top_chains = sorted([c for c in chains_data if c.get("tvl") is not None and c.get("tvl", 0) > 0],
168
+ key=lambda x: x.get("tvl", 0), reverse=True)[:5]
169
+
170
+ result += "**Top Chains by TVL:**\n"
171
+ for i, chain in enumerate(top_chains, 1):
172
+ name = chain.get("name", "Unknown")
173
+ tvl = chain.get("tvl", 0)
174
+ result += f"{i}. **{name}**: ${tvl/1e9:.2f}B\n"
175
+
176
+ # Add top protocol categories
177
+ categories = {}
178
+ for protocol in protocols_data:
179
+ if protocol.get("tvl") is not None and protocol.get("tvl", 0) > 0:
180
+ category = protocol.get("category", "Other")
181
+ categories[category] = categories.get(category, 0) + protocol.get("tvl", 0)
182
 
183
+ if categories:
184
+ result += "\n**Top Categories by TVL:**\n"
185
+ sorted_categories = sorted(categories.items(), key=lambda x: x[1], reverse=True)[:5]
186
+ for i, (category, tvl) in enumerate(sorted_categories, 1):
187
+ result += f"{i}. **{category}**: ${tvl/1e9:.2f}B\n"
188
 
189
  return result
190
 
191
+ except Exception as e:
192
+ logger.error(f"TVL overview error: {e}")
193
  return await self._get_top_protocols()
194
+
195
+ async def _get_chain_tvl(self, chain_query: str) -> str:
196
+ """Get chain TVL data using /v2/historicalChainTvl/{chain} endpoint"""
197
+ try:
198
+ # Map common chain names
199
+ chain_mapping = {
200
+ "ethereum": "Ethereum",
201
+ "eth": "Ethereum",
202
+ "polygon": "Polygon",
203
+ "matic": "Polygon",
204
+ "bsc": "BSC",
205
+ "binance": "BSC",
206
+ "avalanche": "Avalanche",
207
+ "avax": "Avalanche",
208
+ "arbitrum": "Arbitrum",
209
+ "optimism": "Optimism",
210
+ "fantom": "Fantom",
211
+ "solana": "Solana",
212
+ "sol": "Solana"
213
+ }
214
+
215
+ # Extract chain name from query
216
+ chain_name = None
217
+ for key, value in chain_mapping.items():
218
+ if key in chain_query.lower():
219
+ chain_name = value
220
+ break
221
+
222
+ if not chain_name:
223
+ # Try to get all chains first
224
+ chains_data = await self.make_request(f"{self._base_url}/v2/chains")
225
+ if chains_data:
226
+ result = "⛓️ **Available Chains:**\n\n"
227
+ sorted_chains = sorted([c for c in chains_data if c.get("tvl", 0) > 0],
228
+ key=lambda x: x.get("tvl", 0), reverse=True)[:10]
229
+ for i, chain in enumerate(sorted_chains, 1):
230
+ name = chain.get("name", "Unknown")
231
+ tvl = chain.get("tvl", 0)
232
+ result += f"{i}. **{name}**: ${tvl/1e9:.2f}B TVL\n"
233
+ return result
234
+ else:
235
+ return f"❌ Chain '{chain_query}' not recognized. Try: ethereum, polygon, bsc, avalanche, etc."
236
+
237
+ # Get historical TVL for the chain
238
+ historical_data = await self.make_request(f"{self._base_url}/v2/historicalChainTvl/{chain_name}")
239
+
240
+ if not historical_data:
241
+ return f"❌ No data available for {chain_name}"
242
+
243
+ # Get current TVL (last entry)
244
+ current_tvl = historical_data[-1]["tvl"] if historical_data else 0
245
+
246
+ result = f"⛓️ **{chain_name} Chain Analysis:**\n\n"
247
+ result += f"💰 **Current TVL**: ${current_tvl/1e9:.2f}B\n"
248
+
249
+ # Calculate changes if we have enough data
250
+ if len(historical_data) >= 2:
251
+ prev_tvl = historical_data[-2]["tvl"]
252
+ daily_change = ((current_tvl - prev_tvl) / prev_tvl) * 100 if prev_tvl > 0 else 0
253
+ emoji = "📈" if daily_change >= 0 else "📉"
254
+ result += f"� **24h Change**: {daily_change:+.2f}% {emoji}\n"
255
+
256
+ if len(historical_data) >= 7:
257
+ week_ago_tvl = historical_data[-7]["tvl"]
258
+ weekly_change = ((current_tvl - week_ago_tvl) / week_ago_tvl) * 100 if week_ago_tvl > 0 else 0
259
+ emoji = "📈" if weekly_change >= 0 else "📉"
260
+ result += f"📈 **7d Change**: {weekly_change:+.2f}% {emoji}\n"
261
+
262
+ return result
263
+
264
+ except Exception as e:
265
+ logger.error(f"Chain TVL error: {e}")
266
+ return f"⚠️ Error fetching chain data: {str(e)}"
267
 
268
  async def _search_protocols(self, query: str) -> str:
269
+ """Search protocols by name"""
270
+ try:
271
+ protocols = await self.make_request(f"{self._base_url}/protocols")
272
+
273
+ if not protocols:
274
+ return "⚠️ No protocol data available"
275
+
276
+ # Search for matching protocols
277
+ query_lower = query.lower()
278
+ matching = []
279
+
280
+ for p in protocols:
281
+ name = p.get("name", "").lower()
282
+ category = p.get("category", "").lower()
283
+
284
+ if (query_lower in name or
285
+ query_lower in category or
286
+ any(word in name for word in query_lower.split())):
287
+ matching.append(p)
288
+
289
+ # Sort by TVL and limit results
290
+ matching = sorted([p for p in matching if p.get("tvl") is not None and p.get("tvl", 0) > 0],
291
+ key=lambda x: x.get("tvl", 0), reverse=True)[:8]
292
+
293
+ if not matching:
294
+ return f"❌ No protocols found matching '{query}'"
295
+
296
+ result = f"🔍 **Protocols matching '{query}':**\n\n"
297
+
298
+ for i, protocol in enumerate(matching, 1):
299
+ name = protocol.get("name", "Unknown")
300
+ tvl = protocol.get("tvl", 0)
301
+ chain = protocol.get("chain", "Multi-chain")
302
+ category = protocol.get("category", "Unknown")
303
+ change_1d = protocol.get("change_1d", 0)
304
+
305
+ emoji = "📈" if change_1d >= 0 else "📉"
306
+ tvl_formatted = f"${tvl/1e9:.2f}B" if tvl >= 1e9 else f"${tvl/1e6:.1f}M"
307
+
308
+ result += f"{i}. **{name}** ({category})\n"
309
+ result += f" 💰 {tvl_formatted} TVL on {chain} {emoji} {change_1d:+.1f}%\n\n"
310
+
311
+ return result
312
+
313
+ except Exception as e:
314
+ logger.error(f"Search protocols error: {e}")
315
+ return f"⚠️ Search temporarily unavailable: {str(e)}"
src/tools/etherscan_tool.py CHANGED
@@ -25,7 +25,7 @@ class EtherscanTool(BaseWeb3Tool):
25
  if not self.enabled:
26
  logger.warning("Etherscan API key not configured - limited functionality")
27
 
28
- async def _arun(self, query: str, filters: Optional[Dict[str, Any]] = None) -> str:
29
  if not self.enabled:
30
  return "⚠️ **Etherscan Service Limited**\n\nEtherscan functionality requires an API key.\nGet yours free at: https://etherscan.io/apis\n\nSet environment variable: `ETHERSCAN_API_KEY=your_key`"
31
 
 
25
  if not self.enabled:
26
  logger.warning("Etherscan API key not configured - limited functionality")
27
 
28
+ async def _arun(self, query: str, filters: Optional[Dict[str, Any]] = None, **kwargs) -> str:
29
  if not self.enabled:
30
  return "⚠️ **Etherscan Service Limited**\n\nEtherscan functionality requires an API key.\nGet yours free at: https://etherscan.io/apis\n\nSet environment variable: `ETHERSCAN_API_KEY=your_key`"
31
 
src/utils/ai_safety.py ADDED
@@ -0,0 +1,192 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ AI Safety Module for Ollama Integration
3
+ Implements content filtering, prompt sanitization, and safety guardrails
4
+ """
5
+
6
+ import re
7
+ import logging
8
+ from typing import Dict, List, Tuple, Optional, Any
9
+ from datetime import datetime, timedelta
10
+
11
+ logger = logging.getLogger(__name__)
12
+
13
+ class AISafetyGuard:
14
+ """AI Safety guardrails for Ollama interactions"""
15
+
16
+ def __init__(self):
17
+ self.blocked_patterns = self._load_blocked_patterns()
18
+ self.request_history = []
19
+ self.max_requests_per_minute = 10
20
+ self.max_query_length = 2000
21
+
22
+ def _load_blocked_patterns(self) -> List[str]:
23
+ """Load patterns that should be blocked for safety"""
24
+ return [
25
+ # Malicious patterns
26
+ r'(?i)hack|exploit|vulnerability|backdoor|malware',
27
+ r'(?i)bypass.*security|override.*safety|disable.*filter',
28
+ r'(?i)jailbreak|prompt.*injection|ignore.*instructions',
29
+
30
+ # Financial manipulation
31
+ r'(?i)pump.*dump|market.*manipulation|insider.*trading',
32
+ r'(?i)fake.*price|manipulate.*market|artificial.*inflation',
33
+
34
+ # Personal data requests
35
+ r'(?i)private.*key|wallet.*seed|password|personal.*data',
36
+ r'(?i)social.*security|credit.*card|bank.*account',
37
+
38
+ # Harmful content
39
+ r'(?i)illegal.*activity|money.*laundering|tax.*evasion',
40
+ r'(?i)terrorist.*financing|sanctions.*evasion',
41
+
42
+ # System manipulation
43
+ r'(?i)system.*prompt|role.*play.*as|pretend.*to.*be',
44
+ r'(?i)act.*as.*if|simulate.*being|become.*character',
45
+ ]
46
+
47
+ def sanitize_query(self, query: str) -> Tuple[str, bool, str]:
48
+ """
49
+ Sanitize user query for safety
50
+ Returns: (sanitized_query, is_safe, reason)
51
+ """
52
+ if not query or not query.strip():
53
+ return "", False, "Empty query"
54
+
55
+ # Check query length
56
+ if len(query) > self.max_query_length:
57
+ return "", False, f"Query too long ({len(query)} chars, max {self.max_query_length})"
58
+
59
+ # Check for blocked patterns
60
+ for pattern in self.blocked_patterns:
61
+ if re.search(pattern, query):
62
+ logger.warning(f"Blocked unsafe query pattern: {pattern}")
63
+ return "", False, "Query contains potentially unsafe content"
64
+
65
+ # Basic sanitization
66
+ sanitized = query.strip()
67
+ sanitized = re.sub(r'[<>]', '', sanitized) # Remove HTML brackets
68
+ sanitized = re.sub(r'\s+', ' ', sanitized) # Normalize whitespace
69
+
70
+ return sanitized, True, "Query is safe"
71
+
72
+ def check_rate_limit(self, user_id: str = "default") -> Tuple[bool, str]:
73
+ """Check if request rate limit is exceeded"""
74
+ current_time = datetime.now()
75
+
76
+ # Clean old requests (older than 1 minute)
77
+ self.request_history = [
78
+ req for req in self.request_history
79
+ if current_time - req['timestamp'] < timedelta(minutes=1)
80
+ ]
81
+
82
+ # Count requests from this user in the last minute
83
+ user_requests = [
84
+ req for req in self.request_history
85
+ if req['user_id'] == user_id
86
+ ]
87
+
88
+ if len(user_requests) >= self.max_requests_per_minute:
89
+ return False, f"Rate limit exceeded: {len(user_requests)}/{self.max_requests_per_minute} requests per minute"
90
+
91
+ # Add current request
92
+ self.request_history.append({
93
+ 'user_id': user_id,
94
+ 'timestamp': current_time
95
+ })
96
+
97
+ return True, "Rate limit OK"
98
+
99
+ def validate_ollama_response(self, response: str) -> Tuple[str, bool, str]:
100
+ """
101
+ Validate Ollama response for safety and quality
102
+ Returns: (cleaned_response, is_valid, reason)
103
+ """
104
+ if not response or not response.strip():
105
+ return "", False, "Empty response from Ollama"
106
+
107
+ # Check for dangerous content in response
108
+ dangerous_patterns = [
109
+ r'(?i)here.*is.*how.*to.*hack',
110
+ r'(?i)steps.*to.*exploit',
111
+ r'(?i)bypass.*security.*by',
112
+ r'(?i)manipulate.*market.*by',
113
+ ]
114
+
115
+ for pattern in dangerous_patterns:
116
+ if re.search(pattern, response):
117
+ logger.warning(f"Blocked unsafe Ollama response: {pattern}")
118
+ return "", False, "Response contains potentially unsafe content"
119
+
120
+ # Basic response cleaning
121
+ cleaned = response.strip()
122
+
123
+ # Remove any potential HTML/JavaScript
124
+ cleaned = re.sub(r'<script.*?</script>', '', cleaned, flags=re.DOTALL | re.IGNORECASE)
125
+ cleaned = re.sub(r'<[^>]+>', '', cleaned)
126
+
127
+ # Ensure response is within reasonable length
128
+ if len(cleaned) > 10000: # 10k character limit
129
+ cleaned = cleaned[:10000] + "\n\n[Response truncated for safety]"
130
+
131
+ return cleaned, True, "Response is safe"
132
+
133
+ def validate_gemini_response(self, response: str) -> Tuple[str, bool, str]:
134
+ """
135
+ Validate Gemini response for safety and quality
136
+ Returns: (cleaned_response, is_valid, reason)
137
+ """
138
+ if not response or not response.strip():
139
+ return "", False, "Empty response from Gemini"
140
+
141
+ # Check for dangerous content in response
142
+ dangerous_patterns = [
143
+ r'(?i)here.*is.*how.*to.*hack',
144
+ r'(?i)steps.*to.*exploit',
145
+ r'(?i)bypass.*security.*by',
146
+ r'(?i)manipulate.*market.*by',
147
+ ]
148
+
149
+ for pattern in dangerous_patterns:
150
+ if re.search(pattern, response):
151
+ logger.warning(f"Blocked unsafe Gemini response: {pattern}")
152
+ return "", False, "Response contains potentially unsafe content"
153
+
154
+ # Basic response cleaning
155
+ cleaned = response.strip()
156
+
157
+ # Remove any potential HTML/JavaScript
158
+ cleaned = re.sub(r'<script.*?</script>', '', cleaned, flags=re.DOTALL | re.IGNORECASE)
159
+ cleaned = re.sub(r'<[^>]+>', '', cleaned)
160
+
161
+ # Ensure response is within reasonable length
162
+ if len(cleaned) > 10000: # 10k character limit
163
+ cleaned = cleaned[:10000] + "\n\n[Response truncated for safety]"
164
+
165
+ return cleaned, True, "Response is safe"
166
+
167
+ def create_safe_prompt(self, user_query: str, tool_context: str) -> str:
168
+ """Create a safety-enhanced prompt for Ollama - Optimized for speed"""
169
+
170
+ # Truncate context if too long to improve processing speed
171
+ if len(tool_context) > 2000:
172
+ tool_context = tool_context[:2000] + "\n[Context truncated for processing speed]"
173
+
174
+ prompt = f"""Answer this cryptocurrency question using the data provided:
175
+
176
+ QUESTION: {user_query}
177
+
178
+ DATA:
179
+ {tool_context}
180
+
181
+ Provide a helpful, factual response focused on cryptocurrency analysis. Be concise and professional.
182
+
183
+ ANSWER:"""
184
+
185
+ return prompt
186
+
187
+ def log_safety_event(self, event_type: str, details: Dict[str, Any]):
188
+ """Log safety-related events for monitoring"""
189
+ logger.info(f"AI Safety Event: {event_type} - {details}")
190
+
191
+ # Global safety instance
192
+ ai_safety = AISafetyGuard()
src/utils/config.py CHANGED
@@ -8,10 +8,19 @@ load_dotenv()
8
 
9
  @dataclass
10
  class Config:
11
- GEMINI_API_KEY: str = os.getenv("GEMINI_API_KEY", "")
12
- COINGECKO_API_KEY: Optional[str] = os.getenv("COINGECKO_API_KEY")
13
- CRYPTOCOMPARE_API_KEY: Optional[str] = os.getenv("CRYPTOCOMPARE_API_KEY")
14
- ETHERSCAN_API_KEY: str = os.getenv("ETHERSCAN_API_KEY", "")
 
 
 
 
 
 
 
 
 
15
 
16
  COINGECKO_BASE_URL: str = "https://api.coingecko.com/api/v3"
17
  CRYPTOCOMPARE_BASE_URL: str = "https://min-api.cryptocompare.com/data"
 
8
 
9
  @dataclass
10
  class Config:
11
+ # LLM Configuration - Both Ollama and Gemini available
12
+ GEMINI_API_KEY: str = os.getenv("GEMINI_API_KEY", "") # Enable Gemini when API key provided
13
+ USE_OLLAMA_ONLY: bool = not bool(os.getenv("GEMINI_API_KEY")) # Auto-detect based on API key
14
+
15
+ # Available API Keys
16
+ COINGECKO_API_KEY: Optional[str] = None # Not available - costs money
17
+ CRYPTOCOMPARE_API_KEY: Optional[str] = os.getenv("CRYPTOCOMPARE_API_KEY") # Available
18
+ ETHERSCAN_API_KEY: str = os.getenv("ETHERSCAN_API_KEY", "") # Available
19
+
20
+ # Ollama Configuration
21
+ OLLAMA_BASE_URL: str = "http://localhost:11434"
22
+ OLLAMA_MODEL: str = "llama3.1:8b" # Upgraded to Llama 3.1 8B for HF Spaces with 16GB RAM
23
+ USE_OLLAMA_FALLBACK: bool = True
24
 
25
  COINGECKO_BASE_URL: str = "https://api.coingecko.com/api/v3"
26
  CRYPTOCOMPARE_BASE_URL: str = "https://min-api.cryptocompare.com/data"
static/app.js ADDED
@@ -0,0 +1,387 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ let chatHistory = [];
2
+ let messageCount = 0;
3
+ let useGemini = false; // Track current LLM choice
4
+
5
+ // Initialize Gemini toggle
6
+ document.addEventListener('DOMContentLoaded', function() {
7
+ const geminiToggle = document.getElementById('geminiToggle');
8
+ const toggleLabel = document.querySelector('.toggle-label');
9
+
10
+ // Load saved preference
11
+ useGemini = localStorage.getItem('useGemini') === 'true';
12
+ geminiToggle.checked = useGemini;
13
+ updateToggleLabel();
14
+
15
+ // Handle toggle changes
16
+ geminiToggle.addEventListener('change', function() {
17
+ useGemini = this.checked;
18
+ localStorage.setItem('useGemini', useGemini.toString());
19
+ updateToggleLabel();
20
+ console.log(`Switched to ${useGemini ? 'Gemini' : 'Ollama'} mode`);
21
+
22
+ // Show confirmation
23
+ showStatus(`Switched to ${useGemini ? 'Gemini (Cloud AI)' : 'Ollama (Local AI)'} mode`, 'info');
24
+
25
+ // Refresh status to reflect changes
26
+ checkStatus();
27
+ });
28
+ });
29
+
30
+ function updateToggleLabel() {
31
+ const toggleLabel = document.querySelector('.toggle-label');
32
+ if (toggleLabel) {
33
+ toggleLabel.textContent = `AI Model: ${useGemini ? 'Gemini' : 'Ollama'}`;
34
+ }
35
+ }
36
+
37
+ async function checkStatus() {
38
+ try {
39
+ const response = await fetch('/status');
40
+ const status = await response.json();
41
+
42
+ const statusDiv = document.getElementById('status');
43
+
44
+ if (status.enabled && status.gemini_configured) {
45
+ statusDiv.className = 'status online';
46
+ statusDiv.innerHTML = '<span>Research systems online</span>' +
47
+ '<div style="margin-top: 0.5rem; font-size: 0.85rem; opacity: 0.8;">' +
48
+ 'Tools: ' + status.tools_available.join(' • ') + '</div>';
49
+ } else {
50
+ statusDiv.className = 'status offline';
51
+ statusDiv.innerHTML = '<span>Limited mode - Configure GEMINI_API_KEY for full functionality</span>' +
52
+ '<div style="margin-top: 0.5rem; font-size: 0.85rem; opacity: 0.8;">' +
53
+ 'Available: ' + status.tools_available.join(' • ') + '</div>';
54
+ }
55
+ } catch (error) {
56
+ const statusDiv = document.getElementById('status');
57
+ statusDiv.className = 'status offline';
58
+ statusDiv.innerHTML = '<span>Connection error</span>';
59
+ }
60
+ }
61
+
62
+ async function sendQuery() {
63
+ const input = document.getElementById('queryInput');
64
+ const sendBtn = document.getElementById('sendBtn');
65
+ const loadingIndicator = document.getElementById('loadingIndicator');
66
+ const statusIndicator = document.getElementById('statusIndicator');
67
+ const statusText = document.getElementById('statusText');
68
+ const query = input.value.trim();
69
+
70
+ if (!query) {
71
+ showStatus('Please enter a research query', 'warning');
72
+ return;
73
+ }
74
+
75
+ console.log('Sending research query');
76
+ addMessage('user', query);
77
+ input.value = '';
78
+
79
+ // Update UI states
80
+ sendBtn.disabled = true;
81
+ sendBtn.innerHTML = '<span class="loading">Processing</span>';
82
+ loadingIndicator.classList.add('active');
83
+ showStatus('Initializing research...', 'processing');
84
+
85
+ try {
86
+ console.log('Starting streaming API request...');
87
+ const requestStart = Date.now();
88
+
89
+ // Create an AbortController for manual timeout control
90
+ const controller = new AbortController();
91
+ const timeoutId = setTimeout(() => {
92
+ console.log('Manual timeout after 5 minutes');
93
+ controller.abort();
94
+ }, 300000); // 5 minute timeout instead of default browser timeout
95
+
96
+ // Use fetch with streaming for POST requests with body
97
+ const response = await fetch('/query/stream', {
98
+ method: 'POST',
99
+ headers: {
100
+ 'Content-Type': 'application/json',
101
+ 'Accept': 'text/event-stream',
102
+ 'Cache-Control': 'no-cache'
103
+ },
104
+ body: JSON.stringify({
105
+ query,
106
+ chat_history: chatHistory,
107
+ use_gemini: useGemini
108
+ }),
109
+ signal: controller.signal,
110
+ // Disable browser's default timeout behavior
111
+ keepalive: true
112
+ });
113
+
114
+ // Clear the timeout since we got a response
115
+ clearTimeout(timeoutId);
116
+
117
+ if (!response.ok) {
118
+ throw new Error('Request failed with status ' + response.status);
119
+ }
120
+
121
+ const reader = response.body.getReader();
122
+ const decoder = new TextDecoder();
123
+ let buffer = '';
124
+
125
+ while (true) {
126
+ const { done, value } = await reader.read();
127
+ if (done) break;
128
+
129
+ buffer += decoder.decode(value, { stream: true });
130
+ const lines = buffer.split('\n');
131
+ buffer = lines.pop(); // Keep incomplete line in buffer
132
+
133
+ for (const line of lines) {
134
+ if (line.startsWith('data: ')) {
135
+ try {
136
+ const data = JSON.parse(line.substring(6));
137
+
138
+ if (data.type === 'status') {
139
+ showStatus(data.message, 'processing');
140
+ updateProgress(data.progress);
141
+ // Also update the loading text
142
+ const loadingText = document.getElementById('loadingText');
143
+ if (loadingText) {
144
+ loadingText.textContent = data.message;
145
+ }
146
+ console.log('Progress: ' + data.progress + '% - ' + data.message);
147
+ } else if (data.type === 'tools') {
148
+ showStatus(data.message, 'processing');
149
+ // Update loading text for tools
150
+ const loadingText = document.getElementById('loadingText');
151
+ if (loadingText) {
152
+ loadingText.textContent = data.message;
153
+ }
154
+ console.log('Tools: ' + data.message);
155
+ } else if (data.type === 'result') {
156
+ const result = data.data;
157
+ const requestTime = Date.now() - requestStart;
158
+ console.log('Request completed in ' + requestTime + 'ms');
159
+
160
+ if (result.success) {
161
+ addMessage('assistant', result.response, result.sources, result.visualizations);
162
+ showStatus('Research complete', 'success');
163
+ console.log('Analysis completed successfully');
164
+ } else {
165
+ console.log('Analysis request failed');
166
+ addMessage('assistant', result.response || 'Analysis temporarily unavailable. Please try again.', [], []);
167
+ showStatus('Request failed', 'error');
168
+ }
169
+ } else if (data.type === 'complete') {
170
+ break;
171
+ } else if (data.type === 'error') {
172
+ throw new Error(data.message);
173
+ }
174
+ } catch (parseError) {
175
+ console.error('Parse error:', parseError);
176
+ }
177
+ }
178
+ }
179
+ }
180
+
181
+ } catch (error) {
182
+ console.error('Streaming request error:', error);
183
+
184
+ // More specific error handling
185
+ if (error.name === 'AbortError') {
186
+ addMessage('assistant', 'Request timed out after 5 minutes. Ollama may be processing a complex query. Please try a simpler question or wait and try again.');
187
+ showStatus('Request timed out', 'error');
188
+ } else if (error.message.includes('Failed to fetch') || error.message.includes('network error')) {
189
+ addMessage('assistant', 'Network connection error. Please check your internet connection and try again.');
190
+ showStatus('Connection error', 'error');
191
+ } else if (error.message.includes('ERR_HTTP2_PROTOCOL_ERROR')) {
192
+ addMessage('assistant', 'Ollama is still processing your request in the background. Please wait a moment and try again, or try a simpler query.');
193
+ showStatus('Processing - please retry', 'warning');
194
+ } else {
195
+ addMessage('assistant', 'Connection error. Please check your network and try again.');
196
+ showStatus('Connection error', 'error');
197
+ }
198
+ } finally {
199
+ // Reset UI states
200
+ sendBtn.disabled = false;
201
+ sendBtn.innerHTML = 'Research';
202
+ loadingIndicator.classList.remove('active');
203
+ input.focus();
204
+ console.log('Request completed');
205
+
206
+ // Hide status after delay
207
+ setTimeout(() => hideStatus(), 3000);
208
+ }
209
+ }
210
+
211
+ function addMessage(sender, content, sources = [], visualizations = []) {
212
+ const messagesDiv = document.getElementById('chatMessages');
213
+
214
+ // Clear welcome message
215
+ if (messageCount === 0) {
216
+ messagesDiv.innerHTML = '';
217
+ }
218
+ messageCount++;
219
+
220
+ const messageDiv = document.createElement('div');
221
+ messageDiv.className = 'message ' + sender;
222
+
223
+ let sourcesHtml = '';
224
+ if (sources && sources.length > 0) {
225
+ sourcesHtml = `
226
+ <div class="sources">
227
+ Sources: ${sources.map(s => `<span>${s}</span>`).join('')}
228
+ </div>
229
+ `;
230
+ }
231
+
232
+ let visualizationHtml = '';
233
+ if (visualizations && visualizations.length > 0) {
234
+ console.log('Processing visualizations:', visualizations.length);
235
+ visualizationHtml = visualizations.map((viz, index) => {
236
+ console.log(`Visualization ${index}:`, viz.substring(0, 100));
237
+ return `<div class="visualization-container" id="viz-${Date.now()}-${index}">${viz}</div>`;
238
+ }).join('');
239
+ }
240
+
241
+ // Format content based on sender
242
+ let formattedContent = content;
243
+ if (sender === 'assistant') {
244
+ // Convert markdown to HTML for assistant responses
245
+ try {
246
+ formattedContent = marked.parse(content);
247
+ } catch (error) {
248
+ // Fallback to basic formatting if marked.js fails
249
+ console.warn('Markdown parsing failed, using fallback:', error);
250
+ formattedContent = content
251
+ .replace(/\n/g, '<br>')
252
+ .replace(/\*\*(.*?)\*\*/g, '<strong>$1</strong>')
253
+ .replace(/\*(.*?)\*/g, '<em>$1</em>')
254
+ .replace(/`(.*?)`/g, '<code>$1</code>');
255
+ }
256
+ } else {
257
+ // Apply markdown parsing to user messages too
258
+ try {
259
+ formattedContent = marked.parse(content);
260
+ } catch (error) {
261
+ formattedContent = content.replace(/\n/g, '<br>');
262
+ }
263
+ }
264
+
265
+ messageDiv.innerHTML = `
266
+ <div class="message-content">
267
+ ${formattedContent}
268
+ ${sourcesHtml}
269
+ </div>
270
+ ${visualizationHtml}
271
+ <div class="message-meta">${new Date().toLocaleTimeString()}</div>
272
+ `;
273
+
274
+ messagesDiv.appendChild(messageDiv);
275
+ messagesDiv.scrollTop = messagesDiv.scrollHeight;
276
+
277
+ // Execute any scripts in the visualizations after DOM insertion
278
+ if (visualizations && visualizations.length > 0) {
279
+ console.log('Executing visualization scripts...');
280
+ setTimeout(() => {
281
+ const scripts = messageDiv.querySelectorAll('script');
282
+ console.log(`Found ${scripts.length} scripts to execute`);
283
+
284
+ scripts.forEach((script, index) => {
285
+ console.log(`Executing script ${index}:`, script.textContent.substring(0, 200) + '...');
286
+ try {
287
+ // Execute script in global context using Function constructor
288
+ const scriptFunction = new Function(script.textContent);
289
+ scriptFunction.call(window);
290
+ console.log(`Script ${index} executed successfully`);
291
+ } catch (error) {
292
+ console.error(`Script ${index} execution error:`, error);
293
+ console.error(`Script content preview:`, script.textContent.substring(0, 500));
294
+ }
295
+ });
296
+ console.log('All visualization scripts executed');
297
+ }, 100);
298
+ }
299
+
300
+ chatHistory.push({ role: sender, content });
301
+ if (chatHistory.length > 20) chatHistory = chatHistory.slice(-20);
302
+ }
303
+
304
+ function setQuery(query) {
305
+ document.getElementById('queryInput').value = query;
306
+ setTimeout(() => sendQuery(), 100);
307
+ }
308
+
309
+ // Status management functions
310
+ function showStatus(message, type = 'info') {
311
+ const statusIndicator = document.getElementById('statusIndicator');
312
+ const statusText = document.getElementById('statusText');
313
+
314
+ statusText.textContent = message;
315
+ statusIndicator.className = `status-indicator show ${type}`;
316
+ }
317
+
318
+ function hideStatus() {
319
+ const statusIndicator = document.getElementById('statusIndicator');
320
+ statusIndicator.classList.remove('show');
321
+ }
322
+
323
+ function updateProgress(progress) {
324
+ // Update progress bar if it exists
325
+ const progressBar = document.querySelector('.progress-bar');
326
+ if (progressBar) {
327
+ progressBar.style.width = `${progress}%`;
328
+ }
329
+
330
+ // Update loading indicator text with progress
331
+ const loadingText = document.getElementById('loadingText');
332
+ if (loadingText && progress) {
333
+ loadingText.textContent = `Processing ${progress}%...`;
334
+ }
335
+ }
336
+
337
+ // Theme toggle functionality
338
+ function toggleTheme() {
339
+ const currentTheme = document.documentElement.getAttribute('data-theme');
340
+ const newTheme = currentTheme === 'light' ? 'dark' : 'light';
341
+ const themeIcon = document.querySelector('#themeToggle i');
342
+
343
+ document.documentElement.setAttribute('data-theme', newTheme);
344
+ localStorage.setItem('theme', newTheme);
345
+
346
+ // Update icon
347
+ if (newTheme === 'light') {
348
+ themeIcon.className = 'fas fa-sun';
349
+ } else {
350
+ themeIcon.className = 'fas fa-moon';
351
+ }
352
+ }
353
+
354
+ // Initialize theme
355
+ function initializeTheme() {
356
+ const savedTheme = localStorage.getItem('theme') || 'dark';
357
+ const themeIcon = document.querySelector('#themeToggle i');
358
+
359
+ document.documentElement.setAttribute('data-theme', savedTheme);
360
+
361
+ if (savedTheme === 'light') {
362
+ themeIcon.className = 'fas fa-sun';
363
+ } else {
364
+ themeIcon.className = 'fas fa-moon';
365
+ }
366
+ }
367
+
368
+ // Event listeners
369
+ document.getElementById('queryInput').addEventListener('keypress', (e) => {
370
+ if (e.key === 'Enter') sendQuery();
371
+ });
372
+
373
+ document.getElementById('sendBtn').addEventListener('click', (e) => {
374
+ console.log('Research button clicked');
375
+ e.preventDefault();
376
+ sendQuery();
377
+ });
378
+
379
+ document.getElementById('themeToggle').addEventListener('click', toggleTheme);
380
+
381
+ // Initialize
382
+ document.addEventListener('DOMContentLoaded', () => {
383
+ console.log('Application initialized');
384
+ initializeTheme();
385
+ checkStatus();
386
+ document.getElementById('queryInput').focus();
387
+ });
static/styles.css ADDED
@@ -0,0 +1,767 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ :root {
2
+ --primary: #0066ff;
3
+ --primary-dark: #0052cc;
4
+ --accent: #00d4aa;
5
+ --background: #000000;
6
+ --surface: #111111;
7
+ --surface-elevated: #1a1a1a;
8
+ --text: #ffffff;
9
+ --text-secondary: #a0a0a0;
10
+ --text-muted: #666666;
11
+ --border: rgba(255, 255, 255, 0.08);
12
+ --border-focus: rgba(0, 102, 255, 0.3);
13
+ --shadow: rgba(0, 0, 0, 0.4);
14
+ --success: #00d4aa;
15
+ --warning: #ffa726;
16
+ --error: #f44336;
17
+ }
18
+
19
+ [data-theme="light"] {
20
+ --background: #ffffff;
21
+ --surface: #f8f9fa;
22
+ --surface-elevated: #ffffff;
23
+ --text: #1a1a1a;
24
+ --text-secondary: #4a5568;
25
+ --text-muted: #718096;
26
+ --border: rgba(0, 0, 0, 0.08);
27
+ --border-focus: rgba(0, 102, 255, 0.3);
28
+ --shadow: rgba(0, 0, 0, 0.1);
29
+ }
30
+
31
+ * {
32
+ margin: 0;
33
+ padding: 0;
34
+ box-sizing: border-box;
35
+ transition: background-color 0.3s ease, color 0.3s ease, border-color 0.3s ease;
36
+ }
37
+
38
+ body {
39
+ font-family: -apple-system, BlinkMacSystemFont, 'SF Pro Display', system-ui, sans-serif;
40
+ background: var(--background);
41
+ color: var(--text);
42
+ line-height: 1.5;
43
+ min-height: 100vh;
44
+ font-weight: 400;
45
+ -webkit-font-smoothing: antialiased;
46
+ -moz-osx-font-smoothing: grayscale;
47
+ }
48
+
49
+ .container {
50
+ max-width: 1000px;
51
+ margin: 0 auto;
52
+ padding: 2rem 1.5rem;
53
+ }
54
+
55
+ .header {
56
+ text-align: center;
57
+ margin-bottom: 2.5rem;
58
+ }
59
+ .header-content {
60
+ display: flex;
61
+ justify-content: space-between;
62
+ align-items: center;
63
+ max-width: 100%;
64
+ }
65
+ .header-text {
66
+ flex: 1;
67
+ text-align: center;
68
+ }
69
+ .header-controls {
70
+ display: flex;
71
+ align-items: center;
72
+ gap: 1rem;
73
+ }
74
+
75
+ /* LLM Toggle Switch Styles */
76
+ .llm-toggle {
77
+ display: flex;
78
+ align-items: center;
79
+ gap: 0.5rem;
80
+ }
81
+ .toggle-label {
82
+ font-size: 0.875rem;
83
+ color: var(--text-secondary);
84
+ font-weight: 500;
85
+ }
86
+
87
+ .switch {
88
+ position: relative;
89
+ display: inline-block;
90
+ width: 80px;
91
+ height: 32px;
92
+ }
93
+
94
+ .switch input {
95
+ opacity: 0;
96
+ width: 0;
97
+ height: 0;
98
+ }
99
+
100
+ .slider {
101
+ position: absolute;
102
+ cursor: pointer;
103
+ top: 0;
104
+ left: 0;
105
+ right: 0;
106
+ bottom: 0;
107
+ background-color: var(--surface);
108
+ border: 1px solid var(--border);
109
+ transition: .4s;
110
+ overflow: hidden;
111
+ }
112
+
113
+ .slider:before {
114
+ position: absolute;
115
+ content: "";
116
+ height: 24px;
117
+ width: 24px;
118
+ left: 3px;
119
+ bottom: 3px;
120
+ background-color: var(--primary);
121
+ transition: .4s;
122
+ border-radius: 50%;
123
+ z-index: 2;
124
+ }
125
+
126
+ .slider-text-off, .slider-text-on {
127
+ position: absolute;
128
+ color: var(--text-secondary);
129
+ font-size: 0.7rem;
130
+ font-weight: 500;
131
+ top: 50%;
132
+ transform: translateY(-50%);
133
+ transition: .4s;
134
+ pointer-events: none;
135
+ z-index: 1;
136
+ }
137
+
138
+ .slider-text-off {
139
+ left: 8px;
140
+ }
141
+
142
+ .slider-text-on {
143
+ right: 8px;
144
+ opacity: 0;
145
+ }
146
+
147
+ input:checked + .slider {
148
+ background-color: var(--accent);
149
+ border-color: var(--accent);
150
+ }
151
+
152
+ input:checked + .slider .slider-text-off {
153
+ opacity: 0;
154
+ }
155
+
156
+ input:checked + .slider .slider-text-on {
157
+ opacity: 1;
158
+ }
159
+
160
+ input:checked + .slider:before {
161
+ transform: translateX(48px);
162
+ }
163
+
164
+ .slider.round {
165
+ border-radius: 20px;
166
+ }
167
+
168
+ .slider.round:before {
169
+ border-radius: 50%;
170
+ }
171
+
172
+ .theme-toggle {
173
+ background: var(--surface);
174
+ border: 1px solid var(--border);
175
+ border-radius: 8px;
176
+ padding: 0.75rem;
177
+ color: var(--text);
178
+ cursor: pointer;
179
+ transition: all 0.2s ease;
180
+ font-size: 1.1rem;
181
+ min-width: 44px;
182
+ height: 44px;
183
+ display: flex;
184
+ align-items: center;
185
+ justify-content: center;
186
+ }
187
+ .theme-toggle:hover {
188
+ background: var(--surface-elevated);
189
+ border-color: var(--primary);
190
+ transform: translateY(-1px);
191
+ }
192
+
193
+ .header h1 {
194
+ font-size: 2.25rem;
195
+ font-weight: 600;
196
+ color: var(--text);
197
+ margin-bottom: 0.5rem;
198
+ letter-spacing: -0.025em;
199
+ }
200
+
201
+ .header .brand {
202
+ color: var(--primary);
203
+ }
204
+
205
+ .header p {
206
+ color: var(--text-secondary);
207
+ font-size: 1rem;
208
+ font-weight: 400;
209
+ }
210
+
211
+ .status {
212
+ background: var(--surface);
213
+ border: 1px solid var(--border);
214
+ border-radius: 12px;
215
+ padding: 1rem 1.5rem;
216
+ margin-bottom: 2rem;
217
+ text-align: center;
218
+ transition: all 0.2s ease;
219
+ }
220
+
221
+ .status.online {
222
+ border-color: var(--success);
223
+ background: linear-gradient(135deg, rgba(0, 212, 170, 0.05), rgba(0, 212, 170, 0.02));
224
+ }
225
+
226
+ .status.offline {
227
+ border-color: var(--error);
228
+ background: linear-gradient(135deg, rgba(244, 67, 54, 0.05), rgba(244, 67, 54, 0.02));
229
+ }
230
+
231
+ .status.checking {
232
+ border-color: var(--warning);
233
+ background: linear-gradient(135deg, rgba(255, 167, 38, 0.05), rgba(255, 167, 38, 0.02));
234
+ animation: pulse 2s infinite;
235
+ }
236
+
237
+ @keyframes pulse {
238
+ 0%, 100% { opacity: 1; }
239
+ 50% { opacity: 0.8; }
240
+ }
241
+
242
+ .chat-interface {
243
+ background: var(--surface);
244
+ border: 1px solid var(--border);
245
+ border-radius: 16px;
246
+ overflow: hidden;
247
+ margin-bottom: 2rem;
248
+ backdrop-filter: blur(20px);
249
+ }
250
+
251
+ .chat-messages {
252
+ height: 480px;
253
+ overflow-y: auto;
254
+ padding: 2rem;
255
+ background: linear-gradient(180deg, var(--background), var(--surface));
256
+ }
257
+
258
+ .chat-messages::-webkit-scrollbar {
259
+ width: 3px;
260
+ }
261
+
262
+ .chat-messages::-webkit-scrollbar-track {
263
+ background: transparent;
264
+ }
265
+
266
+ .chat-messages::-webkit-scrollbar-thumb {
267
+ background: var(--border);
268
+ border-radius: 2px;
269
+ }
270
+
271
+ .message {
272
+ margin-bottom: 2rem;
273
+ opacity: 0;
274
+ animation: messageSlide 0.4s cubic-bezier(0.2, 0, 0.2, 1) forwards;
275
+ }
276
+
277
+ @keyframes messageSlide {
278
+ from {
279
+ opacity: 0;
280
+ transform: translateY(20px) scale(0.98);
281
+ }
282
+ to {
283
+ opacity: 1;
284
+ transform: translateY(0) scale(1);
285
+ }
286
+ }
287
+
288
+ .message.user {
289
+ text-align: right;
290
+ }
291
+
292
+ .message.assistant {
293
+ text-align: left;
294
+ }
295
+
296
+ .message-content {
297
+ display: inline-block;
298
+ max-width: 75%;
299
+ padding: 1.25rem 1.5rem;
300
+ border-radius: 24px;
301
+ font-size: 0.95rem;
302
+ line-height: 1.6;
303
+ position: relative;
304
+ }
305
+
306
+ .message.user .message-content {
307
+ background: linear-gradient(135deg, var(--primary), var(--primary-dark));
308
+ color: #ffffff;
309
+ border-bottom-right-radius: 8px;
310
+ box-shadow: 0 4px 12px rgba(0, 102, 255, 0.2);
311
+ }
312
+
313
+ .message.assistant .message-content {
314
+ background: var(--surface-elevated);
315
+ color: var(--text);
316
+ border-bottom-left-radius: 8px;
317
+ border: 1px solid var(--border);
318
+ }
319
+ .message-content h1, .message-content h2, .message-content h3, .message-content h4 {
320
+ color: var(--accent);
321
+ margin: 1.25rem 0 0.5rem 0;
322
+ font-weight: 600;
323
+ line-height: 1.3;
324
+ text-shadow: 0 1px 2px rgba(0, 212, 170, 0.1);
325
+ }
326
+ .message-content h1 {
327
+ font-size: 1.35rem;
328
+ background: linear-gradient(135deg, var(--accent), #00b894);
329
+ -webkit-background-clip: text;
330
+ -webkit-text-fill-color: transparent;
331
+ background-clip: text;
332
+ }
333
+ .message-content h2 {
334
+ font-size: 1.2rem;
335
+ color: #00b894;
336
+ }
337
+ .message-content h3 {
338
+ font-size: 1.05rem;
339
+ color: var(--accent);
340
+ }
341
+ .message-content h4 {
342
+ font-size: 0.95rem;
343
+ color: #74b9ff;
344
+ }
345
+ .message-content p {
346
+ margin: 0.75rem 0;
347
+ line-height: 1.65;
348
+ color: var(--text);
349
+ }
350
+ .message-content ul, .message-content ol {
351
+ margin: 0.75rem 0;
352
+ padding-left: 1.5rem;
353
+ line-height: 1.6;
354
+ }
355
+ .message-content li {
356
+ margin: 0.3rem 0;
357
+ line-height: 1.6;
358
+ position: relative;
359
+ }
360
+ .message-content ul li::marker {
361
+ color: var(--accent);
362
+ }
363
+ .message-content ol li::marker {
364
+ color: var(--accent);
365
+ font-weight: 600;
366
+ }
367
+ .message-content table {
368
+ width: 100%;
369
+ border-collapse: collapse;
370
+ margin: 1rem 0;
371
+ font-size: 0.9rem;
372
+ }
373
+ .message-content th, .message-content td {
374
+ border: 1px solid var(--border);
375
+ padding: 0.5rem 0.75rem;
376
+ text-align: left;
377
+ }
378
+ .message-content th {
379
+ background: var(--surface);
380
+ font-weight: 600;
381
+ color: var(--accent);
382
+ }
383
+ .message-content strong {
384
+ background: linear-gradient(135deg, var(--accent), #74b9ff);
385
+ -webkit-background-clip: text;
386
+ -webkit-text-fill-color: transparent;
387
+ background-clip: text;
388
+ font-weight: 700;
389
+ text-shadow: 0 1px 2px rgba(0, 212, 170, 0.2);
390
+ }
391
+ .message-content em {
392
+ color: #a29bfe;
393
+ font-style: italic;
394
+ background: rgba(162, 155, 254, 0.1);
395
+ padding: 0.1rem 0.2rem;
396
+ border-radius: 3px;
397
+ }
398
+ .message-content code {
399
+ background: linear-gradient(135deg, rgba(116, 185, 255, 0.15), rgba(0, 212, 170, 0.1));
400
+ border: 1px solid rgba(116, 185, 255, 0.3);
401
+ padding: 0.2rem 0.5rem;
402
+ border-radius: 6px;
403
+ font-family: 'SF Mono', Consolas, 'Courier New', monospace;
404
+ font-size: 0.85rem;
405
+ color: #74b9ff;
406
+ font-weight: 600;
407
+ text-shadow: 0 1px 2px rgba(116, 185, 255, 0.2);
408
+ }
409
+ .message.user .message-content strong,
410
+ .message.user .message-content code,
411
+ .message.user .message-content em {
412
+ color: rgba(255, 255, 255, 0.95);
413
+ background: rgba(255, 255, 255, 0.1);
414
+ -webkit-text-fill-color: rgba(255, 255, 255, 0.95);
415
+ }
416
+ .message-content pre {
417
+ background: var(--background);
418
+ border: 1px solid var(--border);
419
+ border-radius: 8px;
420
+ padding: 1rem;
421
+ margin: 1rem 0;
422
+ overflow-x: auto;
423
+ font-family: 'SF Mono', Consolas, 'Courier New', monospace;
424
+ font-size: 0.85rem;
425
+ line-height: 1.5;
426
+ }
427
+ .message-content pre code {
428
+ background: none;
429
+ border: none;
430
+ padding: 0;
431
+ font-size: inherit;
432
+ }
433
+ .message-content blockquote {
434
+ border-left: 3px solid var(--accent);
435
+ padding-left: 1rem;
436
+ margin: 1rem 0;
437
+ color: var(--text-secondary);
438
+ font-style: italic;
439
+ background: rgba(0, 212, 170, 0.05);
440
+ padding: 0.75rem 0 0.75rem 1rem;
441
+ border-radius: 0 4px 4px 0;
442
+ }
443
+ .message-content a {
444
+ color: var(--accent);
445
+ text-decoration: none;
446
+ border-bottom: 1px solid transparent;
447
+ transition: border-color 0.2s ease;
448
+ }
449
+ .message-content a:hover {
450
+ border-bottom-color: var(--accent);
451
+ }
452
+ .message.user .message-content {
453
+ word-wrap: break-word;
454
+ white-space: pre-wrap;
455
+ }
456
+ .message.user .message-content strong,
457
+ .message.user .message-content code {
458
+ color: rgba(255, 255, 255, 0.9);
459
+ }
460
+
461
+ .message-meta {
462
+ font-size: 0.75rem;
463
+ color: var(--text-muted);
464
+ margin-top: 0.5rem;
465
+ font-weight: 500;
466
+ }
467
+
468
+ .sources {
469
+ margin-top: 1rem;
470
+ padding-top: 1rem;
471
+ border-top: 1px solid var(--border);
472
+ font-size: 0.8rem;
473
+ color: var(--text-secondary);
474
+ }
475
+
476
+ .sources span {
477
+ display: inline-block;
478
+ background: rgba(0, 102, 255, 0.1);
479
+ border: 1px solid rgba(0, 102, 255, 0.2);
480
+ padding: 0.25rem 0.75rem;
481
+ border-radius: 6px;
482
+ margin: 0.25rem 0.5rem 0.25rem 0;
483
+ font-weight: 500;
484
+ font-size: 0.75rem;
485
+ }
486
+
487
+ .input-area {
488
+ padding: 2rem;
489
+ background: linear-gradient(180deg, var(--surface), var(--surface-elevated));
490
+ border-top: 1px solid var(--border);
491
+ }
492
+
493
+ .input-container {
494
+ display: flex;
495
+ gap: 1rem;
496
+ align-items: stretch;
497
+ }
498
+
499
+ .input-field {
500
+ flex: 1;
501
+ padding: 1rem 1.5rem;
502
+ background: var(--background);
503
+ border: 2px solid var(--border);
504
+ border-radius: 28px;
505
+ color: var(--text);
506
+ font-size: 0.95rem;
507
+ outline: none;
508
+ transition: all 0.2s cubic-bezier(0.2, 0, 0.2, 1);
509
+ font-weight: 400;
510
+ }
511
+
512
+ .input-field:focus {
513
+ border-color: var(--primary);
514
+ box-shadow: 0 0 0 4px var(--border-focus);
515
+ background: var(--surface);
516
+ }
517
+
518
+ .input-field::placeholder {
519
+ color: var(--text-muted);
520
+ font-weight: 400;
521
+ }
522
+
523
+ .send-button {
524
+ padding: 1rem 2rem;
525
+ background: linear-gradient(135deg, var(--primary), var(--primary-dark));
526
+ color: #ffffff;
527
+ border: none;
528
+ border-radius: 28px;
529
+ font-weight: 600;
530
+ cursor: pointer;
531
+ transition: all 0.2s cubic-bezier(0.2, 0, 0.2, 1);
532
+ font-size: 0.95rem;
533
+ box-shadow: 0 4px 12px rgba(0, 102, 255, 0.2);
534
+ }
535
+
536
+ .send-button:hover:not(:disabled) {
537
+ transform: translateY(-2px);
538
+ box-shadow: 0 8px 24px rgba(0, 102, 255, 0.3);
539
+ }
540
+
541
+ .send-button:active {
542
+ transform: translateY(0);
543
+ }
544
+
545
+ .send-button:disabled {
546
+ opacity: 0.6;
547
+ cursor: not-allowed;
548
+ transform: none;
549
+ box-shadow: 0 4px 12px rgba(0, 102, 255, 0.1);
550
+ }
551
+
552
+ .examples {
553
+ display: grid;
554
+ grid-template-columns: repeat(auto-fit, minmax(300px, 1fr));
555
+ gap: 1rem;
556
+ margin-top: 1rem;
557
+ }
558
+
559
+ .example {
560
+ background: linear-gradient(135deg, var(--surface), var(--surface-elevated));
561
+ border: 1px solid var(--border);
562
+ border-radius: 12px;
563
+ padding: 1.5rem;
564
+ cursor: pointer;
565
+ transition: all 0.3s cubic-bezier(0.2, 0, 0.2, 1);
566
+ position: relative;
567
+ overflow: hidden;
568
+ }
569
+
570
+ .example::before {
571
+ content: '';
572
+ position: absolute;
573
+ top: 0;
574
+ left: -100%;
575
+ width: 100%;
576
+ height: 100%;
577
+ background: linear-gradient(90deg, transparent, rgba(0, 102, 255, 0.05), transparent);
578
+ transition: left 0.5s ease;
579
+ }
580
+
581
+ .example:hover::before {
582
+ left: 100%;
583
+ }
584
+
585
+ .example:hover {
586
+ border-color: var(--primary);
587
+ transform: translateY(-4px);
588
+ box-shadow: 0 12px 32px rgba(0, 0, 0, 0.2);
589
+ background: linear-gradient(135deg, var(--surface-elevated), var(--surface));
590
+ }
591
+
592
+ .example-title {
593
+ font-weight: 600;
594
+ color: var(--text);
595
+ margin-bottom: 0.5rem;
596
+ font-size: 0.95rem;
597
+ display: flex;
598
+ align-items: center;
599
+ gap: 0.5rem;
600
+ }
601
+ .example-title i {
602
+ color: var(--primary);
603
+ font-size: 1rem;
604
+ width: 20px;
605
+ text-align: center;
606
+ }
607
+
608
+ .example-desc {
609
+ font-size: 0.85rem;
610
+ color: var(--text-secondary);
611
+ font-weight: 400;
612
+ }
613
+
614
+ .loading {
615
+ display: inline-flex;
616
+ align-items: center;
617
+ gap: 0.5rem;
618
+ color: var(--text-secondary);
619
+ font-weight: 500;
620
+ }
621
+
622
+ .loading::after {
623
+ content: '';
624
+ width: 14px;
625
+ height: 14px;
626
+ border: 2px solid currentColor;
627
+ border-top-color: transparent;
628
+ border-radius: 50%;
629
+ animation: spin 1s linear infinite;
630
+ }
631
+
632
+ @keyframes spin {
633
+ to { transform: rotate(360deg); }
634
+ }
635
+ .loading-indicator {
636
+ display: none;
637
+ background: var(--surface-elevated);
638
+ border: 1px solid var(--border);
639
+ border-radius: 12px;
640
+ padding: 1.5rem;
641
+ margin: 1rem 0;
642
+ text-align: center;
643
+ color: var(--text-secondary);
644
+ }
645
+ .loading-indicator.active {
646
+ display: block;
647
+ }
648
+ .loading-spinner {
649
+ display: inline-block;
650
+ width: 20px;
651
+ height: 20px;
652
+ border: 2px solid var(--border);
653
+ border-top-color: var(--primary);
654
+ border-radius: 50%;
655
+ animation: spin 1s linear infinite;
656
+ margin-right: 0.5rem;
657
+ }
658
+ .progress-container {
659
+ width: 100%;
660
+ height: 4px;
661
+ background: var(--border);
662
+ border-radius: 2px;
663
+ overflow: hidden;
664
+ margin: 10px 0 0 0;
665
+ }
666
+ .progress-bar {
667
+ height: 100%;
668
+ background: linear-gradient(90deg, var(--primary), var(--accent));
669
+ border-radius: 2px;
670
+ transition: width 0.3s ease;
671
+ width: 0%;
672
+ }
673
+ .status-indicator {
674
+ position: fixed;
675
+ top: 20px;
676
+ right: 20px;
677
+ background: var(--surface);
678
+ border: 1px solid var(--border);
679
+ border-radius: 8px;
680
+ padding: 0.75rem 1rem;
681
+ font-size: 0.85rem;
682
+ color: var(--text-secondary);
683
+ opacity: 0;
684
+ transform: translateY(-10px);
685
+ transition: all 0.3s ease;
686
+ z-index: 1000;
687
+ }
688
+ .status-indicator.show {
689
+ opacity: 1;
690
+ transform: translateY(0);
691
+ }
692
+ .status-indicator.processing {
693
+ border-color: var(--primary);
694
+ background: linear-gradient(135deg, rgba(0, 102, 255, 0.05), rgba(0, 102, 255, 0.02));
695
+ }
696
+
697
+ .visualization-container {
698
+ margin: 1.5rem 0;
699
+ background: var(--surface-elevated);
700
+ border-radius: 12px;
701
+ padding: 1.5rem;
702
+ border: 1px solid var(--border);
703
+ }
704
+
705
+ .welcome {
706
+ text-align: center;
707
+ padding: 4rem 2rem;
708
+ color: var(--text-secondary);
709
+ }
710
+
711
+ .welcome h3 {
712
+ font-size: 1.25rem;
713
+ font-weight: 600;
714
+ margin-bottom: 0.5rem;
715
+ color: var(--text);
716
+ }
717
+
718
+ .welcome p {
719
+ font-size: 0.95rem;
720
+ font-weight: 400;
721
+ }
722
+
723
+ @media (max-width: 768px) {
724
+ .container {
725
+ padding: 1rem;
726
+ }
727
+
728
+ .header-content {
729
+ flex-direction: column;
730
+ gap: 1rem;
731
+ }
732
+
733
+ .header-text {
734
+ text-align: center;
735
+ }
736
+
737
+ .header h1 {
738
+ font-size: 1.75rem;
739
+ }
740
+
741
+ .chat-messages {
742
+ height: 400px;
743
+ padding: 1.5rem;
744
+ }
745
+
746
+ .message-content {
747
+ max-width: 85%;
748
+ padding: 1rem 1.25rem;
749
+ }
750
+
751
+ .input-area {
752
+ padding: 1.5rem;
753
+ }
754
+
755
+ .input-container {
756
+ flex-direction: column;
757
+ gap: 0.75rem;
758
+ }
759
+
760
+ .send-button {
761
+ align-self: stretch;
762
+ }
763
+
764
+ .examples {
765
+ grid-template-columns: 1fr;
766
+ }
767
+ }
templates/index.html ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!DOCTYPE html>
2
+ <html lang="en">
3
+ <head>
4
+ <meta charset="UTF-8">
5
+ <meta name="viewport" content="width=device-width, initial-scale=1.0">
6
+ <title>Web3 Research Co-Pilot</title>
7
+ <link rel="icon" href="data:image/svg+xml,<svg xmlns=%22http://www.w3.org/2000/svg%22 viewBox=%220 0 24 24%22><path fill=%22%2300d4aa%22 d=%22M12 2L2 7v10c0 5.5 3.8 7.7 9 9 5.2-1.3 9-3.5 9-9V7l-10-5z%22/></svg>">
8
+ <script src="https://cdn.jsdelivr.net/npm/marked/marked.min.js"></script>
9
+ <link href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/6.4.0/css/all.min.css" rel="stylesheet">
10
+ <link rel="stylesheet" href="/static/styles.css">
11
+ </head>
12
+ <body>
13
+ <div id="statusIndicator" class="status-indicator">
14
+ <span id="statusText">Ready</span>
15
+ </div>
16
+
17
+ <div class="container">
18
+ <div class="header">
19
+ <div class="header-content">
20
+ <div class="header-text">
21
+ <h1><span class="brand">Web3</span> Research Co-Pilot</h1>
22
+ <p>Professional cryptocurrency analysis and market intelligence</p>
23
+ </div>
24
+ <div class="header-controls">
25
+ <div class="llm-toggle">
26
+ <span class="toggle-label">AI Model:</span>
27
+ <label class="switch">
28
+ <input type="checkbox" id="geminiToggle" title="Switch between Ollama (Local) and Gemini (Cloud)">
29
+ <span class="slider round">
30
+ <span class="slider-text-off">Ollama</span>
31
+ <span class="slider-text-on">Gemini</span>
32
+ </span>
33
+ </label>
34
+ </div>
35
+ <button id="themeToggle" class="theme-toggle" title="Toggle theme">
36
+ <i class="fas fa-moon"></i>
37
+ </button>
38
+ </div>
39
+ </div>
40
+ </div>
41
+
42
+ <div id="status" class="status checking">
43
+ <span>Initializing research systems...</span>
44
+ </div>
45
+
46
+ <div class="chat-interface">
47
+ <div id="chatMessages" class="chat-messages">
48
+ <div class="welcome">
49
+ <h3>Welcome to Web3 Research Co-Pilot</h3>
50
+ <p>Ask about market trends, DeFi protocols, or blockchain analytics</p>
51
+ </div>
52
+ </div>
53
+ <div id="loadingIndicator" class="loading-indicator">
54
+ <div class="loading-spinner"></div>
55
+ <span id="loadingText">Processing your research query...</span>
56
+ <div class="progress-container">
57
+ <div class="progress-bar" style="width: 0%;"></div>
58
+ </div>
59
+ </div>
60
+ <div class="input-area">
61
+ <div class="input-container">
62
+ <input
63
+ type="text"
64
+ id="queryInput"
65
+ class="input-field"
66
+ placeholder="Research Bitcoin trends, analyze DeFi yields, compare protocols..."
67
+ maxlength="500"
68
+ >
69
+ <button id="sendBtn" class="send-button">Research</button>
70
+ </div>
71
+ </div>
72
+ </div>
73
+
74
+ <div class="examples">
75
+ <div class="example" onclick="setQuery('Analyze Bitcoin price trends and institutional adoption patterns')">
76
+ <div class="example-title"><i class="fas fa-chart-line"></i> Market Analysis</div>
77
+ <div class="example-desc">Bitcoin trends, institutional flows, and market sentiment analysis</div>
78
+ </div>
79
+ <div class="example" onclick="setQuery('Compare top DeFi protocols by TVL, yield, and risk metrics across chains')">
80
+ <div class="example-title"><i class="fas fa-coins"></i> DeFi Intelligence</div>
81
+ <div class="example-desc">Protocol comparison, yield analysis, and cross-chain opportunities</div>
82
+ </div>
83
+ <div class="example" onclick="setQuery('Evaluate Ethereum Layer 2 scaling solutions and adoption metrics')">
84
+ <div class="example-title"><i class="fas fa-layer-group"></i> Layer 2 Research</div>
85
+ <div class="example-desc">Scaling solutions, transaction costs, and ecosystem growth</div>
86
+ </div>
87
+ <div class="example" onclick="setQuery('Find optimal yield farming strategies with risk assessment')">
88
+ <div class="example-title"><i class="fas fa-seedling"></i> Yield Optimization</div>
89
+ <div class="example-desc">Cross-chain opportunities, APY tracking, and risk analysis</div>
90
+ </div>
91
+ <div class="example" onclick="setQuery('Track whale movements and large Bitcoin transactions today')">
92
+ <div class="example-title"><i class="fas fa-fish"></i> Whale Tracking</div>
93
+ <div class="example-desc">Large transactions, wallet analysis, and market impact</div>
94
+ </div>
95
+ <div class="example" onclick="setQuery('Analyze gas fees and network congestion across blockchains')">
96
+ <div class="example-title"><i class="fas fa-tachometer-alt"></i> Network Analytics</div>
97
+ <div class="example-desc">Gas prices, network utilization, and cost comparisons</div>
98
+ </div>
99
+ </div>
100
+ </div>
101
+
102
+ <script src="/static/app.js"></script>
103
+ </body>
104
+ </html>
test_chart_tool.py ADDED
File without changes
test_complete_pipeline.py ADDED
@@ -0,0 +1,166 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ Complete pipeline test for Web3 Research Agent with Ollama fallback
4
+ Tests the entire flow: API calls → LLM processing → Response generation
5
+ """
6
+
7
+ import asyncio
8
+ import sys
9
+ import os
10
+ sys.path.append('.')
11
+
12
+ async def test_complete_pipeline():
13
+ print("🧪 Testing Complete Web3 Research Pipeline with Ollama Fallback")
14
+ print("=" * 60)
15
+
16
+ # Test 1: Initialize the research agent
17
+ print("\n1️⃣ Testing Research Agent Initialization...")
18
+ try:
19
+ from src.agent.research_agent import Web3ResearchAgent
20
+ agent = Web3ResearchAgent()
21
+
22
+ if agent.enabled:
23
+ print(f"✅ Primary LLM (Gemini) initialized successfully")
24
+ else:
25
+ print("⚠️ Primary LLM failed, will test Ollama fallback")
26
+
27
+ print(f"✅ Agent initialized with {len(agent.tools)} tools")
28
+ for tool in agent.tools:
29
+ print(f" - {tool.name}")
30
+
31
+ except Exception as e:
32
+ print(f"❌ Agent initialization failed: {e}")
33
+ return False
34
+
35
+ # Test 2: Test Ollama connection
36
+ print("\n2️⃣ Testing Ollama Connection...")
37
+ try:
38
+ import requests
39
+ response = requests.get("http://localhost:11434/api/tags", timeout=5)
40
+ if response.status_code == 200:
41
+ models = response.json().get("models", [])
42
+ print(f"✅ Ollama connected. Available models: {[m['name'] for m in models]}")
43
+
44
+ # Test direct Ollama inference
45
+ test_response = requests.post(
46
+ "http://localhost:11434/api/generate",
47
+ json={
48
+ "model": "llama3.1:8b",
49
+ "prompt": "What is DeFi in one sentence?",
50
+ "stream": False
51
+ },
52
+ timeout=30
53
+ )
54
+
55
+ if test_response.status_code == 200:
56
+ result = test_response.json()
57
+ print(f"✅ Ollama inference test: {result['response'][:100]}...")
58
+ else:
59
+ print(f"❌ Ollama inference failed: {test_response.status_code}")
60
+
61
+ else:
62
+ print(f"❌ Ollama connection failed: {response.status_code}")
63
+
64
+ except Exception as e:
65
+ print(f"❌ Ollama test failed: {e}")
66
+
67
+ # Test 3: Test API integrations
68
+ print("\n3️⃣ Testing API Integrations...")
69
+
70
+ # Test DeFiLlama
71
+ try:
72
+ from src.tools.defillama_tool import DeFiLlamaTool
73
+ defillama = DeFiLlamaTool()
74
+ result = await defillama._arun("top 3 defi protocols")
75
+ if result and "⚠️" not in result:
76
+ print(f"✅ DeFiLlama API: {result[:80]}...")
77
+ else:
78
+ print(f"⚠️ DeFiLlama API: {result[:80]}...")
79
+ except Exception as e:
80
+ print(f"❌ DeFiLlama test failed: {e}")
81
+
82
+ # Test CoinGecko
83
+ try:
84
+ from src.tools.coingecko_tool import CoinGeckoTool
85
+ coingecko = CoinGeckoTool()
86
+ result = await coingecko._arun("bitcoin price")
87
+ if result and "⚠️" not in result:
88
+ print(f"✅ CoinGecko API: {result[:80]}...")
89
+ else:
90
+ print(f"⚠️ CoinGecko API: {result[:80]}...")
91
+ except Exception as e:
92
+ print(f"❌ CoinGecko test failed: {e}")
93
+
94
+ # Test Chart Data
95
+ try:
96
+ from src.tools.chart_data_tool import ChartDataTool
97
+ chart_tool = ChartDataTool()
98
+ result = await chart_tool._arun("price_chart", "bitcoin", "7d")
99
+ if result and len(result) > 100:
100
+ print(f"✅ Chart Data: Generated {len(result)} chars of chart data")
101
+ else:
102
+ print(f"⚠️ Chart Data: {result[:80]}...")
103
+ except Exception as e:
104
+ print(f"❌ Chart Data test failed: {e}")
105
+
106
+ # Test 4: Test complete research query
107
+ print("\n4️⃣ Testing Complete Research Query...")
108
+ try:
109
+ # Force Ollama fallback by setting GEMINI_API_KEY to invalid
110
+ original_key = os.environ.get('GEMINI_API_KEY')
111
+ os.environ['GEMINI_API_KEY'] = 'invalid_key_for_testing'
112
+
113
+ # Reinitialize agent to trigger fallback
114
+ agent_fallback = Web3ResearchAgent()
115
+
116
+ if agent_fallback.fallback_llm and agent_fallback.ollama_available:
117
+ print("✅ Ollama fallback initialized successfully")
118
+
119
+ # Test with simple query first
120
+ simple_result = await agent_fallback.research_query(
121
+ "What is Bitcoin? Give a brief answer."
122
+ )
123
+
124
+ if simple_result and simple_result.get('success'):
125
+ response_text = simple_result.get('result', simple_result.get('response', 'No response text'))
126
+ llm_used = simple_result.get('metadata', {}).get('llm_used', 'Unknown')
127
+ print(f"✅ Query successful with {llm_used}: {response_text[:100]}...")
128
+
129
+ # Now test with Web3 data integration
130
+ web3_result = await agent_fallback.research_query(
131
+ "Get Bitcoin price and explain current market trends"
132
+ )
133
+
134
+ if web3_result and web3_result.get('success'):
135
+ web3_response = web3_result.get('result', web3_result.get('response', 'No response text'))
136
+ web3_llm = web3_result.get('metadata', {}).get('llm_used', 'Unknown')
137
+ print(f"✅ Web3 integration with {web3_llm}: {web3_response[:100]}...")
138
+ print(f" Sources: {web3_result.get('sources', [])}")
139
+ visualizations = web3_result.get('visualizations', web3_result.get('metadata', {}).get('visualizations', []))
140
+ print(f" Visualizations: {len(visualizations)}")
141
+ else:
142
+ print(f"⚠️ Web3 integration: {web3_result}")
143
+
144
+ else:
145
+ print(f"❌ Query failed: {simple_result}")
146
+ else:
147
+ print("❌ Ollama fallback initialization failed")
148
+
149
+ # Restore original key
150
+ if original_key:
151
+ os.environ['GEMINI_API_KEY'] = original_key
152
+ else:
153
+ os.environ.pop('GEMINI_API_KEY', None)
154
+
155
+ except Exception as e:
156
+ print(f"❌ Complete query test failed: {e}")
157
+ import traceback
158
+ traceback.print_exc()
159
+
160
+ print("\n" + "=" * 60)
161
+ print("🏁 Pipeline Test Complete!")
162
+
163
+ return True
164
+
165
+ if __name__ == "__main__":
166
+ asyncio.run(test_complete_pipeline())
test_response_clean.py ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ Quick test to verify response cleaning works properly
4
+ """
5
+
6
+ import asyncio
7
+ import sys
8
+ import os
9
+
10
+ # Add src to path
11
+ sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'src'))
12
+
13
+ from src.agent.research_agent import Web3ResearchAgent
14
+
15
+ async def test_response_cleaning():
16
+ """Test that responses are properly cleaned of LangChain metadata"""
17
+ print("🧪 Testing response cleaning...")
18
+
19
+ agent = Web3ResearchAgent()
20
+
21
+ if not agent.enabled:
22
+ print("❌ Agent not enabled")
23
+ return False
24
+
25
+ try:
26
+ print("📊 Testing simple Bitcoin price query...")
27
+ result = await agent.research_query("What is Bitcoin current price?", use_gemini=True)
28
+
29
+ if result['success']:
30
+ response_content = result['result']
31
+ print(f"✅ Query successful!")
32
+ print(f"📈 Response type: {type(response_content)}")
33
+ print(f"📄 Response preview: {response_content[:200]}...")
34
+
35
+ # Check if response contains LangChain metadata (bad)
36
+ if "additional_kwargs" in str(response_content) or "response_metadata" in str(response_content):
37
+ print("❌ Response contains LangChain metadata - not properly cleaned")
38
+ return False
39
+ else:
40
+ print("✅ Response properly cleaned - no LangChain metadata found")
41
+ return True
42
+ else:
43
+ print(f"❌ Query failed: {result.get('error', 'Unknown error')}")
44
+ return False
45
+
46
+ except Exception as e:
47
+ print(f"❌ Test failed with exception: {e}")
48
+ return False
49
+
50
+ async def main():
51
+ success = await test_response_cleaning()
52
+ if success:
53
+ print("\n🎉 Response cleaning test passed!")
54
+ return 0
55
+ else:
56
+ print("\n❌ Response cleaning test failed!")
57
+ return 1
58
+
59
+ if __name__ == "__main__":
60
+ exit_code = asyncio.run(main())
61
+ sys.exit(exit_code)
test_tool_selection.py ADDED
File without changes
validate_startup.py ADDED
@@ -0,0 +1,177 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ Comprehensive startup validation script for Web3 Research Co-Pilot
4
+ Validates syntax, imports, and configurations before application startup
5
+ """
6
+
7
+ import ast
8
+ import sys
9
+ import os
10
+ import json
11
+ import importlib.util
12
+ from pathlib import Path
13
+ from typing import List, Dict, Any, Tuple
14
+
15
+ def validate_python_syntax(file_path: str) -> Tuple[bool, str]:
16
+ """Validate Python file syntax."""
17
+ try:
18
+ with open(file_path, 'r', encoding='utf-8') as f:
19
+ source = f.read()
20
+
21
+ ast.parse(source)
22
+ return True, "OK"
23
+ except SyntaxError as e:
24
+ return False, f"Syntax error at line {e.lineno}: {e.msg}"
25
+ except Exception as e:
26
+ return False, f"Parse error: {str(e)}"
27
+
28
+ def validate_imports(file_path: str) -> Tuple[bool, List[str]]:
29
+ """Validate that all imports in a Python file can be resolved."""
30
+ issues = []
31
+ try:
32
+ with open(file_path, 'r', encoding='utf-8') as f:
33
+ source = f.read()
34
+
35
+ tree = ast.parse(source)
36
+
37
+ for node in ast.walk(tree):
38
+ if isinstance(node, ast.Import):
39
+ for alias in node.names:
40
+ try:
41
+ importlib.import_module(alias.name)
42
+ except ImportError:
43
+ issues.append(f"Cannot import: {alias.name}")
44
+
45
+ elif isinstance(node, ast.ImportFrom):
46
+ if node.module:
47
+ try:
48
+ importlib.import_module(node.module)
49
+ except ImportError:
50
+ issues.append(f"Cannot import module: {node.module}")
51
+
52
+ return len(issues) == 0, issues
53
+ except Exception as e:
54
+ return False, [f"Import validation error: {str(e)}"]
55
+
56
+ def validate_json_files() -> Tuple[bool, List[str]]:
57
+ """Validate JSON configuration files."""
58
+ issues = []
59
+ json_files = [
60
+ "pyproject.toml", # Will skip if not JSON
61
+ "app_config.yaml" # Will skip if not JSON
62
+ ]
63
+
64
+ for file_path in json_files:
65
+ if os.path.exists(file_path) and file_path.endswith('.json'):
66
+ try:
67
+ with open(file_path, 'r') as f:
68
+ json.load(f)
69
+ except json.JSONDecodeError as e:
70
+ issues.append(f"Invalid JSON in {file_path}: {str(e)}")
71
+ except Exception as e:
72
+ issues.append(f"Error reading {file_path}: {str(e)}")
73
+
74
+ return len(issues) == 0, issues
75
+
76
+ def validate_environment_variables() -> Tuple[bool, List[str]]:
77
+ """Validate required environment variables."""
78
+ issues = []
79
+ optional_vars = [
80
+ "CRYPTOCOMPARE_API_KEY",
81
+ "ETHERSCAN_API_KEY",
82
+ "COINGECKO_API_KEY"
83
+ ]
84
+
85
+ for var in optional_vars:
86
+ if not os.getenv(var):
87
+ issues.append(f"Optional environment variable {var} not set (will use free tier)")
88
+
89
+ return True, issues # All env vars are optional
90
+
91
+ def main():
92
+ """Run comprehensive startup validation."""
93
+ print("🔍 Starting comprehensive validation...")
94
+
95
+ # Get all Python files
96
+ python_files = []
97
+ for root, dirs, files in os.walk("."):
98
+ # Skip __pycache__ directories
99
+ dirs[:] = [d for d in dirs if d != "__pycache__"]
100
+
101
+ for file in files:
102
+ if file.endswith(".py"):
103
+ python_files.append(os.path.join(root, file))
104
+
105
+ print(f"📁 Found {len(python_files)} Python files to validate")
106
+
107
+ all_valid = True
108
+
109
+ # 1. Syntax validation
110
+ print("\n1️⃣ Validating Python syntax...")
111
+ syntax_issues = []
112
+ for file_path in python_files:
113
+ is_valid, message = validate_python_syntax(file_path)
114
+ if not is_valid:
115
+ syntax_issues.append(f"{file_path}: {message}")
116
+ all_valid = False
117
+ else:
118
+ print(f" ✅ {file_path}")
119
+
120
+ if syntax_issues:
121
+ print("❌ Syntax Issues Found:")
122
+ for issue in syntax_issues:
123
+ print(f" - {issue}")
124
+
125
+ # 2. Critical imports validation (only for main files)
126
+ print("\n2️⃣ Validating critical imports...")
127
+ critical_files = ["app.py", "src/agent/research_agent.py"]
128
+ import_issues = []
129
+
130
+ for file_path in critical_files:
131
+ if os.path.exists(file_path):
132
+ is_valid, issues = validate_imports(file_path)
133
+ if not is_valid:
134
+ for issue in issues:
135
+ import_issues.append(f"{file_path}: {issue}")
136
+ else:
137
+ print(f" ✅ {file_path}")
138
+
139
+ # Show non-critical import issues as warnings
140
+ if import_issues:
141
+ print("⚠️ Import Warnings (may use fallbacks):")
142
+ for issue in import_issues:
143
+ print(f" - {issue}")
144
+
145
+ # 3. JSON validation
146
+ print("\n3️⃣ Validating configuration files...")
147
+ json_valid, json_issues = validate_json_files()
148
+ if not json_valid:
149
+ print("�� Configuration Issues:")
150
+ for issue in json_issues:
151
+ print(f" - {issue}")
152
+ all_valid = False
153
+ else:
154
+ print(" ✅ Configuration files valid")
155
+
156
+ # 4. Environment validation
157
+ print("\n4️⃣ Validating environment...")
158
+ env_valid, env_issues = validate_environment_variables()
159
+ if env_issues:
160
+ print("ℹ️ Environment Info:")
161
+ for issue in env_issues:
162
+ print(f" - {issue}")
163
+ else:
164
+ print(" ✅ Environment configured")
165
+
166
+ # Final result
167
+ print(f"\n{'🎉' if all_valid else '❌'} Validation {'PASSED' if all_valid else 'FAILED'}")
168
+
169
+ if not all_valid:
170
+ print("\n🛠️ Please fix the issues above before starting the application")
171
+ sys.exit(1)
172
+ else:
173
+ print("\n✅ All critical validations passed - ready to start!")
174
+ sys.exit(0)
175
+
176
+ if __name__ == "__main__":
177
+ main()
version.py ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ # Version identifier for debugging
2
+ VERSION = "1.2.1-response-cleanup-fix"