#!/bin/bash

API_URL="${1:-http://localhost:8080/api}"

echo "🌱 Seeding Knowledge Base..."

# Seed knowledge documents
curl -X POST "$API_URL/knowledge" \
  -H "Content-Type: application/json" \
  -d '{
    "title": "Database Service Down - Troubleshooting Guide",
    "content": "# Problem Description\n\nDatabase service demo-db-service:3306 is down and not responding.\n\n# Root Cause Analysis\n\nPossible causes:\n1. Database process crashed or exited abnormally\n2. Server resources exhausted (CPU, memory, disk space)\n3. Network connectivity issues\n4. Database configuration errors or corruption\n5. Operating system level problems\n6. Hardware failure\n\n# Solution\n\n1. **Emergency Response**: Login to demo-db-service host, check if server is reachable\n2. **Service Status Check**: Check database process status (ps aux | grep mysql/postgresql)\n3. **Log Analysis**: Check database log files for errors and crash causes\n4. **Resource Check**: Monitor CPU, memory, disk usage for resource bottlenecks\n5. **Service Restart**: Attempt to restart database service if process has stopped\n6. **Connection Test**: Verify port 3306 is listening and database connection works\n7. **Application Verification**: Confirm applications depending on this database resume normal operation\n\n# Preventive Measures\n\n1. **Monitoring**: Fix timestamp issues, setup multi-layer monitoring\n2. **Resource Management**: Establish resource usage threshold alerts, regularly clean logs\n3. **High Availability**: Deploy database master-slave replication or clustering\n4. **Regular Maintenance**: Establish database health check schedule, regular backup testing\n5. **Capacity Planning**: Analyze historical data, plan resource expansion in advance\n6. **Automation**: Deploy service auto-restart scripts and health check mechanisms\n7. **Documentation**: Establish standardized troubleshooting procedures and emergency response plans",
    "category": "Incident Resolution",
    "tags": ["database", "downtime", "troubleshooting", "production"],
    "status": "published"
  }'

echo ""

curl -X POST "$API_URL/knowledge" \
  -H "Content-Type: application/json" \
  -d '{
    "title": "High CPU Usage - Performance Optimization",
    "content": "# Problem Description\n\nApplication experiencing high CPU usage (>80%) causing performance degradation.\n\n# Root Cause Analysis\n\n- Inefficient algorithms or code loops\n- Memory leaks causing excessive garbage collection\n- Unoptimized database queries\n- High request volume without proper load balancing\n- Resource-intensive background jobs\n\n# Solution\n\n1. **Identify Process**: Use top/htop to identify the process consuming CPU\n2. **Profile Application**: Use profiling tools (pprof, perf, etc.) to find hotspots\n3. **Optimize Code**: Refactor inefficient algorithms and loops\n4. **Database Optimization**: Add indexes, optimize queries\n5. **Scale Horizontally**: Add more instances and load balance traffic\n6. **Caching**: Implement caching for frequently accessed data\n7. **Background Jobs**: Move heavy processing to background queue workers\n\n# Preventive Measures\n\n- Regular performance profiling and benchmarking\n- Implement auto-scaling based on CPU metrics\n- Code review process focusing on performance\n- Load testing before production deployment\n- Monitoring and alerting for CPU thresholds",
    "category": "Performance",
    "tags": ["cpu", "performance", "optimization", "monitoring"],
    "status": "published"
  }'

echo ""

curl -X POST "$API_URL/knowledge" \
  -H "Content-Type: application/json" \
  -d '{
    "title": "Kubernetes Pod CrashLoopBackOff Resolution",
    "content": "# Problem Description\n\nKubernetes pod stuck in CrashLoopBackOff state, unable to start successfully.\n\n# Root Cause Analysis\n\n- Application crash during startup\n- Missing configuration or environment variables\n- Resource limits too restrictive\n- Failed health/readiness probe checks\n- Image pull errors\n- Dependency services not available\n\n# Solution\n\n1. **Check Logs**: `kubectl logs <pod-name> --previous`\n2. **Describe Pod**: `kubectl describe pod <pod-name>` to see events\n3. **Check Image**: Verify image exists and is pullable\n4. **Environment**: Validate ConfigMaps and Secrets are mounted correctly\n5. **Resources**: Check if resource requests/limits are appropriate\n6. **Dependencies**: Ensure database, cache, etc. are accessible\n7. **Probes**: Review health/readiness probe configuration\n8. **Debug**: Use `kubectl exec` or ephemeral containers to debug\n\n# Preventive Measures\n\n- Implement proper startup scripts with error handling\n- Use init containers for dependency checks\n- Set appropriate resource limits based on profiling\n- Implement graceful degradation for missing dependencies\n- Comprehensive logging for troubleshooting\n- Pre-deployment validation in staging environment",
    "category": "Container Orchestration",
    "tags": ["kubernetes", "k8s", "crashloop", "containers", "troubleshooting"],
    "status": "published"
  }'

echo ""

curl -X POST "$API_URL/knowledge" \
  -H "Content-Type: application/json" \
  -d '{
    "title": "SSL Certificate Expiration Prevention",
    "content": "# Problem Description\n\nSSL/TLS certificates expiring causing service disruption and security warnings.\n\n# Root Cause Analysis\n\n- Manual certificate renewal process forgotten\n- No expiration monitoring in place\n- Certificate renewal automation failed\n- DNS validation issues for automated renewal\n\n# Solution\n\n1. **Immediate**: Renew expired certificate manually\n2. **Update**: Deploy new certificate to all affected services\n3. **Verify**: Test HTTPS endpoints\n4. **Clear Cache**: Clear CDN and browser caches if needed\n\n# Preventive Measures\n\n1. **Automation**: Implement cert-manager or similar automated renewal\n2. **Monitoring**: Set up expiration alerts (30, 14, 7, 1 days before)\n3. **Documentation**: Document renewal process and responsible parties\n4. **Testing**: Test renewal process in staging environment\n5. **Backup**: Maintain backup certificates\n6. **Calendar**: Add renewal reminders to team calendar",
    "category": "Security",
    "tags": ["ssl", "tls", "certificate", "security", "automation"],
    "status": "published"
  }'

echo ""

curl -X POST "$API_URL/knowledge" \
  -H "Content-Type: application/json" \
  -d '{
    "title": "Redis Memory Full - Cache Management",
    "content": "# Problem Description\n\nRedis instance running out of memory, causing cache eviction or service errors.\n\n# Root Cause Analysis\n\n- No eviction policy configured\n- Large number of keys without TTL\n- Memory leak in application code\n- Insufficient memory allocation\n- Unexpected traffic spike\n\n# Solution\n\n1. **Immediate**: Identify and remove large/unnecessary keys\n2. **Eviction Policy**: Configure appropriate eviction policy (allkeys-lru, volatile-lru)\n3. **TTL**: Set expiration time for cache entries\n4. **Analyze**: Use MEMORY STATS and MEMORY DOCTOR commands\n5. **Scale**: Increase memory or add Redis cluster nodes\n6. **Optimize**: Review application caching logic\n\n# Preventive Measures\n\n- Configure maxmemory and eviction policies\n- Implement proper TTL for all cached data\n- Monitor memory usage with alerts\n- Regular key analysis and cleanup\n- Capacity planning based on usage patterns\n- Consider Redis cluster for horizontal scaling",
    "category": "Caching",
    "tags": ["redis", "cache", "memory", "performance"],
    "status": "published"
  }'

echo ""
echo "✅ Knowledge base seeded successfully!"
