use crate::error::Result;
use crate::models::{CrawledData, DataOverview, PlatformStats, Task, TaskLog};
use sqlx::{sqlite::SqlitePool, Row};
use std::path::PathBuf;

#[derive(Clone)]
pub struct Database {
    pool: SqlitePool,
}

impl Database {
    pub async fn new() -> Result<Self> {
        let db_path = Self::get_db_path()?;
        let db_url = format!("sqlite:{}?mode=rwc", db_path.display());
        
        // 创建数据库目录
        if let Some(parent) = db_path.parent() {
            tokio::fs::create_dir_all(parent).await?;
        }
        
        // 连接数据库
        let pool = SqlitePool::connect(&db_url).await?;
        
        // 手动创建表而不是使用迁移
        Self::create_tables(&pool).await?;
        
        Ok(Self { pool })
    }
    
    pub async fn new_memory() -> Result<Self> {
        let pool = SqlitePool::connect("sqlite::memory:").await?;
        
        // 手动创建表
        Self::create_tables(&pool).await?;
        
        Ok(Self { pool })
    }
    
    fn get_db_path() -> Result<PathBuf> {
        // 使用应用数据目录来存储数据库文件
        let app_data_dir = dirs::data_dir()
            .ok_or_else(|| crate::error::AppError::Unknown("无法获取应用数据目录".to_string()))?
            .join("mediacrawler-gui");
        
        // 如果应用数据目录不存在，则创建它
        std::fs::create_dir_all(&app_data_dir)?;
        
        let db_path = app_data_dir.join("sqlite_tables.db");
        
        println!("🗄️ [Tauri] 数据库路径: {:?}", db_path);
        
        Ok(db_path)
    }
    
    async fn create_tables(pool: &SqlitePool) -> Result<()> {
        // 创建爬取数据表
        sqlx::query(
            r#"
            CREATE TABLE IF NOT EXISTS crawled_data (
                id TEXT PRIMARY KEY,
                platform TEXT NOT NULL,
                data_type TEXT NOT NULL,
                title TEXT NOT NULL,
                content TEXT,
                author TEXT NOT NULL,
                author_id TEXT NOT NULL,
                url TEXT NOT NULL,
                like_count INTEGER DEFAULT 0,
                comment_count INTEGER DEFAULT 0,
                share_count INTEGER DEFAULT 0,
                view_count INTEGER DEFAULT 0,
                created_at TEXT NOT NULL,
                crawled_at TEXT NOT NULL,
                keywords TEXT NOT NULL
            )
            "#
        )
        .execute(pool)
        .await?;
        
        // 创建索引
        sqlx::query("CREATE INDEX IF NOT EXISTS idx_platform ON crawled_data(platform)")
            .execute(pool)
            .await?;
        
        sqlx::query("CREATE INDEX IF NOT EXISTS idx_crawled_at ON crawled_data(crawled_at)")
            .execute(pool)
            .await?;
        
        sqlx::query("CREATE INDEX IF NOT EXISTS idx_keywords ON crawled_data(keywords)")
            .execute(pool)
            .await?;
        
        // 创建任务表
        sqlx::query(
            r#"
            CREATE TABLE IF NOT EXISTS crawler_tasks (
                id TEXT PRIMARY KEY,
                name TEXT NOT NULL,
                platform TEXT NOT NULL,
                task_type TEXT NOT NULL,
                keywords TEXT,
                status TEXT NOT NULL DEFAULT 'pending',
                progress REAL DEFAULT 0.0,
                total_count INTEGER DEFAULT 0,
                success_count INTEGER DEFAULT 0,
                failed_count INTEGER DEFAULT 0,
                created_at TEXT NOT NULL,
                started_at TEXT,
                completed_at TEXT,
                error_message TEXT,
                config TEXT
            )
            "#
        )
        .execute(pool)
        .await?;
        
        // 创建任务日志表
        sqlx::query(
            r#"
            CREATE TABLE IF NOT EXISTS task_logs (
                id TEXT PRIMARY KEY,
                task_id TEXT NOT NULL,
                level TEXT NOT NULL,
                message TEXT NOT NULL,
                created_at TEXT NOT NULL,
                FOREIGN KEY (task_id) REFERENCES crawler_tasks (id)
            )
            "#
        )
        .execute(pool)
        .await?;
        
        // 创建日志表
        sqlx::query(
            r#"
            CREATE TABLE IF NOT EXISTS logs (
                id INTEGER PRIMARY KEY AUTOINCREMENT,
                level TEXT NOT NULL,
                message TEXT NOT NULL,
                created_at TEXT NOT NULL
            )
            "#
        )
        .execute(pool)
        .await?;
        
        // 创建配置表
        sqlx::query(
            r#"
            CREATE TABLE IF NOT EXISTS app_settings (
                key TEXT PRIMARY KEY,
                value TEXT NOT NULL,
                updated_at TEXT NOT NULL
            )
            "#
        )
        .execute(pool)
        .await?;
        
        Ok(())
    }
    
    pub async fn insert_crawled_data(&self, data: &CrawledData) -> Result<()> {
        sqlx::query(
            r#"
            INSERT INTO crawled_data (
                id, platform, data_type, title, content, author, author_id, 
                url, like_count, comment_count, share_count, view_count, 
                created_at, crawled_at, keywords
            ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
            "#
        )
        .bind(&data.id)
        .bind(&data.platform)
        .bind(&data.data_type)
        .bind(&data.title)
        .bind(&data.content)
        .bind(&data.author)
        .bind(&data.author_id)
        .bind(&data.url)
        .bind(data.like_count)
        .bind(data.comment_count)
        .bind(data.share_count)
        .bind(data.view_count)
        .bind(&data.created_at)
        .bind(&data.crawled_at)
        .bind(&data.keywords)
        .execute(&self.pool)
        .await?;
        
        Ok(())
    }
    
    pub async fn get_data_overview(&self) -> Result<Vec<DataOverview>> {
        let rows = sqlx::query(
            r#"
            SELECT 
                platform,
                data_type,
                COUNT(*) as total_count,
                COUNT(CASE WHEN DATE(crawled_at) = DATE('now') THEN 1 END) as today_count,
                MAX(crawled_at) as last_update
            FROM crawled_data 
            GROUP BY platform, data_type
            ORDER BY platform, data_type
            "#
        )
        .fetch_all(&self.pool)
        .await?;
        
        let mut overview = Vec::new();
        for row in rows {
            let platform: String = row.get("platform");
            let data_type: String = row.get("data_type");
            let total_count: i64 = row.get("total_count");
            let today_count: i64 = row.get("today_count");
            let last_update: Option<String> = row.get("last_update");
            
            let platform_icon = match platform.as_str() {
                "xhs" => "🔴",
                "douyin" => "🎵",
                "kuaishou" => "⚡",
                "bilibili" => "📺",
                "weibo" => "📱",
                "zhihu" => "🗣️",
                "tieba" => "💬",
                _ => "📄",
            };
            
            let platform_name = match platform.as_str() {
                "xhs" => "小红书",
                "douyin" => "抖音",
                "kuaishou" => "快手",
                "bilibili" => "B站",
                "weibo" => "微博",
                "zhihu" => "知乎",
                "tieba" => "贴吧",
                _ => &platform,
            };
            
            overview.push(DataOverview {
                platform: platform_name.to_string(),
                platform_icon: platform_icon.to_string(),
                data_type,
                total_count: total_count as i32,
                today_count: today_count as i32,
                last_update: last_update.unwrap_or_else(|| "未知".to_string()),
            });
        }
        
        Ok(overview)
    }
    
    pub async fn get_data_list(
        &self,
        platform: Option<&str>,
        limit: i32,
        offset: i32,
    ) -> Result<Vec<CrawledData>> {
        let rows = if let Some(platform) = platform {
            sqlx::query(
                r#"
                SELECT * FROM crawled_data 
                WHERE platform = ? 
                ORDER BY crawled_at DESC 
                LIMIT ? OFFSET ?
                "#
            )
            .bind(platform)
            .bind(limit)
            .bind(offset)
            .fetch_all(&self.pool)
            .await?
        } else {
            sqlx::query(
                r#"
                SELECT * FROM crawled_data 
                ORDER BY crawled_at DESC 
                LIMIT ? OFFSET ?
                "#
            )
            .bind(limit)
            .bind(offset)
            .fetch_all(&self.pool)
            .await?
        };
        
        let mut data = Vec::new();
        for row in rows {
            data.push(CrawledData {
                id: row.get("id"),
                platform: row.get("platform"),
                data_type: row.get("data_type"),
                title: row.get("title"),
                content: row.get("content"),
                author: row.get("author"),
                author_id: row.get("author_id"),
                url: row.get("url"),
                like_count: row.get("like_count"),
                comment_count: row.get("comment_count"),
                share_count: row.get("share_count"),
                view_count: row.get("view_count"),
                created_at: row.get("created_at"),
                crawled_at: row.get("crawled_at"),
                keywords: row.get("keywords"),
            });
        }
        
        Ok(data)
    }
    
    pub async fn get_platform_stats(&self) -> Result<Vec<PlatformStats>> {
        let rows = sqlx::query(
            r#"
            SELECT 
                platform,
                COUNT(*) as total_data,
                AVG(like_count + comment_count + share_count) as avg_engagement,
                MAX(crawled_at) as last_update
            FROM crawled_data 
            GROUP BY platform
            ORDER BY total_data DESC
            "#
        )
        .fetch_all(&self.pool)
        .await?;
        
        let mut stats = Vec::new();
        for row in rows {
            let platform: String = row.get("platform");
            let total_data: i64 = row.get("total_data");
            let last_update: Option<String> = row.get("last_update");
            
            let (icon, platform_name) = match platform.as_str() {
                "xhs" => ("🔴", "小红书"),
                "douyin" => ("🎵", "抖音"),
                "kuaishou" => ("⚡", "快手"),
                "bilibili" => ("📺", "B站"),
                "weibo" => ("📱", "微博"),
                "zhihu" => ("🗣️", "知乎"),
                "tieba" => ("💬", "贴吧"),
                _ => ("📄", platform.as_str()),
            };
            
            stats.push(PlatformStats {
                platform: platform_name.to_string(),
                icon: icon.to_string(),
                total_tasks: 0, // 这里需要从任务表获取
                success_tasks: 0,
                failed_tasks: 0,
                total_data: total_data as i32,
                avg_speed: 15.0, // 模拟数据
                success_rate: 95.0, // 模拟数据
                last_update: last_update.unwrap_or_else(|| "未知".to_string()),
            });
        }
        
        Ok(stats)
    }
    
    pub async fn delete_data(&self, ids: Vec<String>) -> Result<i32> {
        let mut deleted_count = 0;
        
        for id in ids {
            let result = sqlx::query("DELETE FROM crawled_data WHERE id = ?")
                .bind(id)
                .execute(&self.pool)
                .await?;
            
            deleted_count += result.rows_affected() as i32;
        }
        
        Ok(deleted_count)
    }
    
    pub async fn get_data_count(&self, platform: Option<&str>) -> Result<i32> {
        let row = if let Some(platform) = platform {
            sqlx::query("SELECT COUNT(*) as count FROM crawled_data WHERE platform = ?")
                .bind(platform)
                .fetch_one(&self.pool)
                .await?
        } else {
            sqlx::query("SELECT COUNT(*) as count FROM crawled_data")
                .fetch_one(&self.pool)
                .await?
        };
        
        let count: i64 = row.get("count");
        Ok(count as i32)
    }
    
    // 任务相关方法
    pub async fn insert_task(&self, task: &Task) -> Result<()> {
        sqlx::query(
            r#"
            INSERT INTO crawler_tasks (
                id, name, platform, task_type, keywords, status, progress,
                total_count, success_count, failed_count, created_at, 
                started_at, completed_at, error_message, config
            ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
            "#
        )
        .bind(&task.id)
        .bind(&task.name)
        .bind(&task.platform)
        .bind(&task.task_type)
        .bind(&task.keywords)
        .bind(&task.status)
        .bind(task.progress)
        .bind(task.total_count)
        .bind(task.success_count)
        .bind(task.failed_count)
        .bind(&task.created_at)
        .bind(&task.started_at)
        .bind(&task.completed_at)
        .bind(&task.error_message)
        .bind(&task.config)
        .execute(&self.pool)
        .await?;
        
        Ok(())
    }
    
    pub async fn update_task_status(&self, task_id: &str, status: &str) -> Result<()> {
        sqlx::query(
            r#"
            UPDATE crawler_tasks 
            SET status = ?,
                started_at = CASE 
                    WHEN ? = 'running' AND started_at IS NULL THEN datetime('now')
                    ELSE started_at
                END,
                completed_at = CASE 
                    WHEN ? IN ('completed', 'failed') THEN datetime('now')
                    ELSE completed_at
                END
            WHERE id = ?
            "#
        )
        .bind(status)
        .bind(status)
        .bind(status)
        .bind(task_id)
        .execute(&self.pool)
        .await?;
        
        Ok(())
    }
    
    pub async fn update_task_progress(&self, task_id: &str, progress: f64, success_count: i32, failed_count: i32) -> Result<()> {
        sqlx::query(
            r#"
            UPDATE crawler_tasks 
            SET progress = ?,
                success_count = ?,
                failed_count = ?
            WHERE id = ?
            "#
        )
        .bind(progress)
        .bind(success_count)
        .bind(failed_count)
        .bind(task_id)
        .execute(&self.pool)
        .await?;
        
        Ok(())
    }
    
    pub async fn update_task_error(&self, task_id: &str, error_message: &str) -> Result<()> {
        sqlx::query(
            r#"
            UPDATE crawler_tasks 
            SET error_message = ?
            WHERE id = ?
            "#
        )
        .bind(error_message)
        .bind(task_id)
        .execute(&self.pool)
        .await?;
        
        Ok(())
    }
    
    pub async fn get_task_by_id(&self, task_id: &str) -> Result<Option<Task>> {
        let row = sqlx::query_as::<_, Task>(
            r#"
            SELECT * FROM crawler_tasks WHERE id = ?
            "#
        )
        .bind(task_id)
        .fetch_optional(&self.pool)
        .await?;
        
        Ok(row)
    }
    
    pub async fn get_all_tasks(&self) -> Result<Vec<Task>> {
        let tasks = sqlx::query_as::<_, Task>(
            r#"
            SELECT * FROM crawler_tasks ORDER BY created_at DESC
            "#
        )
        .fetch_all(&self.pool)
        .await?;
        
        Ok(tasks)
    }
    
    pub async fn insert_task_log(&self, log: &TaskLog) -> Result<()> {
        sqlx::query(
            r#"
            INSERT INTO task_logs (
                id, task_id, level, message, created_at
            ) VALUES (?, ?, ?, ?, ?)
            "#
        )
        .bind(&log.id)
        .bind(&log.task_id)
        .bind(&log.level)
        .bind(&log.message)
        .bind(&log.created_at)
        .execute(&self.pool)
        .await?;
        
        Ok(())
    }
    
    pub async fn get_task_logs(&self, task_id: &str, limit: i32) -> Result<Vec<TaskLog>> {
        let logs = sqlx::query_as::<_, TaskLog>(
            r#"
            SELECT * FROM task_logs 
            WHERE task_id = ? 
            ORDER BY created_at DESC 
            LIMIT ?
            "#
        )
        .bind(task_id)
        .bind(limit)
        .fetch_all(&self.pool)
        .await?;
        
        Ok(logs)
    }
}