Arslan1997 commited on
Commit
83e76e6
·
1 Parent(s): f85c79e

adding blog + viz fullscreen

Browse files
app.py CHANGED
@@ -37,6 +37,7 @@ from src.agents.retrievers.retrievers import *
37
  from src.managers.ai_manager import AI_Manager
38
  from src.managers.session_manager import SessionManager
39
  from src.routes.analytics_routes import router as analytics_router
 
40
  from src.routes.chat_routes import router as chat_router
41
  from src.routes.code_routes import router as code_router
42
  from src.routes.feedback_routes import router as feedback_router
@@ -1583,6 +1584,7 @@ app.include_router(session_router)
1583
  app.include_router(feedback_router)
1584
  app.include_router(deep_analysis_router)
1585
  app.include_router(templates_router)
 
1586
 
1587
  if __name__ == "__main__":
1588
  port = int(os.environ.get("PORT", 8000))
 
37
  from src.managers.ai_manager import AI_Manager
38
  from src.managers.session_manager import SessionManager
39
  from src.routes.analytics_routes import router as analytics_router
40
+ from src.routes.blog_routes import router as blog_router
41
  from src.routes.chat_routes import router as chat_router
42
  from src.routes.code_routes import router as code_router
43
  from src.routes.feedback_routes import router as feedback_router
 
1584
  app.include_router(feedback_router)
1585
  app.include_router(deep_analysis_router)
1586
  app.include_router(templates_router)
1587
+ app.include_router(blog_router)
1588
 
1589
  if __name__ == "__main__":
1590
  port = int(os.environ.get("PORT", 8000))
src/routes/blog_routes.py ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ from fastapi import APIRouter, HTTPException
4
+ from typing import List
5
+ from pydantic import BaseModel
6
+
7
+ router = APIRouter()
8
+
9
+ class BlogPost(BaseModel):
10
+ id: str
11
+ title: str
12
+ excerpt: str
13
+ content: str
14
+ author: str
15
+ publishedAt: str
16
+ tags: List[str]
17
+ featured: bool
18
+ readTime: str
19
+
20
+ @router.get("/api/blog/posts", response_model=List[BlogPost])
21
+ async def get_blog_posts():
22
+ """Get all blog posts"""
23
+ try:
24
+ # Get the path to the utils/data directory
25
+ current_dir = os.path.dirname(os.path.abspath(__file__))
26
+ utils_dir = os.path.join(current_dir, '..', 'utils', 'data')
27
+ json_path = os.path.join(utils_dir, 'sample-posts.json')
28
+
29
+ # Normalize the path
30
+ json_path = os.path.normpath(json_path)
31
+
32
+ if not os.path.exists(json_path):
33
+ raise HTTPException(status_code=404, detail=f"Blog posts data file not found at: {json_path}")
34
+
35
+ with open(json_path, 'r', encoding='utf-8') as f:
36
+ posts = json.load(f)
37
+
38
+ return posts
39
+
40
+ except FileNotFoundError:
41
+ raise HTTPException(status_code=404, detail=f"Blog posts data file not found at: {json_path}")
42
+ except json.JSONDecodeError:
43
+ raise HTTPException(status_code=500, detail="Invalid JSON format in blog posts data")
44
+ except Exception as e:
45
+ raise HTTPException(status_code=500, detail=f"Error loading blog posts: {str(e)}")
46
+
47
+ @router.get("/api/blog/posts/{post_id}", response_model=BlogPost)
48
+ async def get_blog_post(post_id: str):
49
+ """Get a specific blog post by ID"""
50
+ try:
51
+ posts = await get_blog_posts()
52
+
53
+ for post in posts:
54
+ if post['id'] == post_id:
55
+ return post
56
+
57
+ raise HTTPException(status_code=404, detail="Blog post not found")
58
+
59
+ except HTTPException:
60
+ raise
61
+ except Exception as e:
62
+ raise HTTPException(status_code=500, detail=f"Error loading blog post: {str(e)}")
63
+
64
+ @router.get("/api/blog/posts/featured", response_model=BlogPost)
65
+ async def get_featured_post():
66
+ """Get the featured blog post"""
67
+ try:
68
+ posts = await get_blog_posts()
69
+
70
+ for post in posts:
71
+ if post.get('featured', False):
72
+ return post
73
+
74
+ raise HTTPException(status_code=404, detail="No featured post found")
75
+
76
+ except HTTPException:
77
+ raise
78
+ except Exception as e:
79
+ raise HTTPException(status_code=500, detail=f"Error loading featured post: {str(e)}")
80
+
81
+ @router.get("/api/blog/tags")
82
+ async def get_blog_tags():
83
+ """Get all unique tags from blog posts"""
84
+ try:
85
+ posts = await get_blog_posts()
86
+
87
+ all_tags = set()
88
+ for post in posts:
89
+ all_tags.update(post.get('tags', []))
90
+
91
+ return list(all_tags)
92
+
93
+ except HTTPException:
94
+ raise
95
+ except Exception as e:
96
+ raise HTTPException(status_code=500, detail=f"Error loading blog tags: {str(e)}")
src/utils/data/actual-posts.json ADDED
File without changes
src/utils/data/sample-posts.json ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "id": "1",
4
+ "title": "Getting Started with Auto-Analyst: A Complete Guide",
5
+ "excerpt": "Learn how to set up and use Auto-Analyst for your data analysis needs. This comprehensive guide covers everything from installation to advanced features.",
6
+ "content": "# Getting Started with Auto-Analyst: A Complete Guide\n\nWelcome to Auto-Analyst! This comprehensive guide will help you get started with our AI-powered analytics platform.\n\n## What is Auto-Analyst?\n\nAuto-Analyst is an AI-powered analytics platform that helps you analyze data, create visualizations, and generate insights automatically. Our platform combines the power of multiple AI models to provide comprehensive data analysis capabilities.\n\n## Key Features\n\n- **AI-Powered Analysis**: Multiple AI models including GPT-4, Claude, and more\n- **Interactive Visualizations**: Create beautiful charts and graphs\n- **Natural Language Queries**: Ask questions in plain English\n- **Multiple Data Sources**: Connect to various data sources\n- **Collaborative Features**: Share insights with your team\n\n## Getting Started\n\n### 1. Sign Up\nFirst, create your account at [Auto-Analyst](https://auto-analyst.com).\n\n### 2. Upload Your Data\nYou can upload data in various formats:\n- CSV files\n- Excel spreadsheets\n- JSON data\n- Direct database connections\n\n### 3. Start Analyzing\nOnce your data is uploaded, you can start asking questions:\n\n```python\n# Example: Analyze sales data\n\"What are the top-selling products this quarter?\"\n```\n\n## Advanced Features\n\n### Custom Agents\nCreate custom analysis agents for specific use cases:\n\n![Auto-Analyst Dashboard](https://via.placeholder.com/600x300/FF7F7F/FFFFFF?text=Auto-Analyst+Dashboard)\n\n### Deep Analysis\nUse our deep analysis feature for complex insights:\n\n<iframe width=\"560\" height=\"315\" src=\"https://www.youtube.com/embed/dQw4w9WgXcQ\" frameborder=\"0\" allowfullscreen></iframe>\n\n## Best Practices\n\n1. **Start Simple**: Begin with basic questions and gradually increase complexity\n2. **Use Clear Language**: Be specific in your queries\n3. **Validate Results**: Always verify AI-generated insights\n4. **Iterate**: Refine your analysis based on initial results\n\n## Support\n\nNeed help? Check out our:\n- [Documentation](https://docs.auto-analyst.com)\n- [Community Forum](https://community.auto-analyst.com)\n- [Contact Support](mailto:support@auto-analyst.com)\n\nHappy analyzing! 🚀",
7
+ "author": "Auto-Analyst Team",
8
+ "publishedAt": "2024-01-15",
9
+ "tags": ["tutorial", "getting-started", "guide"],
10
+ "featured": true,
11
+ "readTime": "8 min read"
12
+ },
13
+ {
14
+ "id": "2",
15
+ "title": "Advanced Data Visualization Techniques",
16
+ "excerpt": "Discover advanced techniques for creating compelling data visualizations using Auto-Analyst's powerful visualization tools.",
17
+ "content": "# Advanced Data Visualization Techniques\n\nData visualization is a crucial part of data analysis. In this post, we'll explore advanced techniques for creating compelling visualizations.\n\n## Choosing the Right Chart Type\n\nDifferent data types require different visualization approaches:\n\n### Time Series Data\n- Line charts for trends\n- Area charts for cumulative data\n- Candlestick charts for financial data\n\n### Categorical Data\n- Bar charts for comparisons\n- Pie charts for proportions\n- Treemaps for hierarchical data\n\n### Correlation Analysis\n- Scatter plots for relationships\n- Heatmaps for correlation matrices\n- Bubble charts for multi-dimensional data\n\n## Color Theory in Data Visualization\n\nColors play a crucial role in data visualization:\n\n### Color Palettes\n- **Sequential**: For ordered data (light to dark)\n- **Diverging**: For data with a meaningful center\n- **Qualitative**: For categorical data\n\n### Accessibility\nEnsure your visualizations are accessible:\n- Use colorblind-friendly palettes\n- Provide alternative text descriptions\n- Maintain sufficient contrast ratios\n\n## Interactive Features\n\nMake your visualizations interactive:\n\n```javascript\n// Example: Interactive chart configuration\n{\n \"type\": \"scatter\",\n \"data\": {\n \"x\": [1, 2, 3, 4, 5],\n \"y\": [2, 4, 6, 8, 10]\n },\n \"options\": {\n \"interactive\": true,\n \"zoom\": true,\n \"pan\": true\n }\n}\n```\n\n## Best Practices\n\n1. **Keep it Simple**: Don't overcomplicate your visualizations\n2. **Tell a Story**: Use visualizations to support your narrative\n3. **Test with Users**: Get feedback on your visualizations\n4. **Mobile Responsive**: Ensure visualizations work on all devices\n\n## Conclusion\n\nEffective data visualization requires both technical skill and design sensibility. By following these techniques, you can create visualizations that effectively communicate your data insights.",
18
+ "author": "Sarah Johnson",
19
+ "publishedAt": "2024-01-10",
20
+ "tags": ["visualization", "design", "best-practices"],
21
+ "featured": false,
22
+ "readTime": "6 min read"
23
+ },
24
+ {
25
+ "id": "3",
26
+ "title": "AI Model Comparison: Which One Should You Use?",
27
+ "excerpt": "Compare different AI models available in Auto-Analyst and learn when to use each one for optimal results.",
28
+ "content": "# AI Model Comparison: Which One Should You Use?\n\nAuto-Analyst supports multiple AI models, each with its own strengths and use cases. Let's compare them:\n\n## Available Models\n\n### GPT-4\n- **Best for**: General analysis, creative tasks\n- **Strengths**: Excellent reasoning, creative problem-solving\n- **Use cases**: Exploratory analysis, hypothesis generation\n\n### Claude (Anthropic)\n- **Best for**: Detailed analysis, code generation\n- **Strengths**: Strong reasoning, helpful responses\n- **Use cases**: Technical analysis, documentation\n\n### Gemini (Google)\n- **Best for**: Multimodal analysis, large datasets\n- **Strengths**: Fast processing, good with images\n- **Use cases**: Quick insights, image analysis\n\n### Groq\n- **Best for**: High-speed processing\n- **Strengths**: Very fast inference\n- **Use cases**: Real-time analysis, rapid prototyping\n\n## Performance Comparison\n\n| Model | Speed | Accuracy | Cost | Best Use Case |\n|-------|-------|----------|------|---------------|\n| GPT-4 | Medium | High | High | General analysis |\n| Claude | Medium | High | Medium | Technical tasks |\n| Gemini | Fast | Medium | Low | Quick insights |\n| Groq | Very Fast | Medium | Low | Real-time analysis |\n\n## Choosing the Right Model\n\n### For Beginners\nStart with **Gemini** or **Groq** for fast, cost-effective analysis.\n\n### For Complex Analysis\nUse **GPT-4** or **Claude** for detailed, nuanced insights.\n\n### For Real-time Applications\nChoose **Groq** for high-speed processing.\n\n## Tips for Model Selection\n\n1. **Start Simple**: Begin with faster, cheaper models\n2. **Iterate**: Try different models for the same task\n3. **Compare Results**: Evaluate outputs from multiple models\n4. **Consider Cost**: Balance performance with budget\n\n## Conclusion\n\nThe best model depends on your specific needs. Experiment with different models to find what works best for your use case.",
29
+ "author": "Mike Chen",
30
+ "publishedAt": "2024-01-05",
31
+ "tags": ["ai", "models", "comparison"],
32
+ "featured": false,
33
+ "readTime": "5 min read"
34
+ }
35
+ ]
36
+
37
+
38
+
test_datasets.ipynb CHANGED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": null,
6
+ "id": "ae0e83db",
7
+ "metadata": {},
8
+ "outputs": [],
9
+ "source": [
10
+ "import dspy \n",
11
+ "import duckdb \n",
12
+ "\n",
13
+ "excel = pd.read_excel(io.BytesIO(contents), sheet_name=sheet_name)\n",
14
+ "\n",
15
+ "# Preprocessing steps\n",
16
+ "# 1. Drop empty rows and columns\n",
17
+ "excel_df.dropna(how='all', inplace=True) # Remove empty rows\n",
18
+ "excel_df.dropna(how='all', axis=1, inplace=True) # Remove empty columns\n",
19
+ "\n",
20
+ "# 2. Clean column names\n",
21
+ "excel_df.columns = excel_df.columns.str.strip() # Remove extra spaces\n",
22
+ "\n",
23
+ "# 3. Convert Excel data to CSV with UTF-8-sig encoding\n",
24
+ "csv_buffer = io.StringIO()\n",
25
+ "excel_df.to_csv(csv_buffer, index=False, encoding='utf-8-sig')\n",
26
+ "csv_buffer.seek(0)\n",
27
+ "\n",
28
+ "# Read the processed CSV back into a dataframe\n",
29
+ "new_df = pd.read_csv(csv_buffer)"
30
+ ]
31
+ },
32
+ {
33
+ "cell_type": "code",
34
+ "execution_count": null,
35
+ "id": "bcef79e3",
36
+ "metadata": {},
37
+ "outputs": [],
38
+ "source": []
39
+ }
40
+ ],
41
+ "metadata": {
42
+ "kernelspec": {
43
+ "display_name": "base",
44
+ "language": "python",
45
+ "name": "python3"
46
+ },
47
+ "language_info": {
48
+ "name": "python",
49
+ "version": "3.11.7"
50
+ }
51
+ },
52
+ "nbformat": 4,
53
+ "nbformat_minor": 5
54
+ }