//! GraphQL resolvers
//! 
//! This module contains GraphQL resolver implementations for database operations.

use crate::{
    services::DatabaseIntrospector,
    schema::types::{OrderDirection, PaginationInput, UserStub, JsonConnection},
};
use async_graphql::{Context, Object, FieldResult};
use sea_orm::{DatabaseConnection, Statement, DatabaseBackend as DbBackend, ConnectionTrait};
use std::sync::Arc;
use tracing::{debug, info};

/// Query resolver
pub struct QueryResolver;

#[Object]
impl QueryResolver {
    /// Get database metadata
    async fn database_metadata(&self, ctx: &Context<'_>) -> FieldResult<serde_json::Value> {
        let db = ctx.data::<Arc<DatabaseConnection>>()?;
        let introspector = DatabaseIntrospector::new(Arc::clone(db));
        info!("Fetching database metadata");
        let metadata = introspector.get_database_metadata().await?;
        Ok(serde_json::to_value(metadata).unwrap_or(serde_json::json!({ "error": "serialize_failed" })))
    }
    
    /// Get table names
    async fn table_names(&self, ctx: &Context<'_>) -> FieldResult<Vec<String>> {
        let db = ctx.data::<Arc<DatabaseConnection>>()?;
        let introspector = DatabaseIntrospector::new(Arc::clone(db));
        
        debug!("Fetching table names");
        let tables = introspector.get_tables().await?;
        let names = tables.into_iter().map(|t| t.name).collect();
        
        Ok(names)
    }

    /// Demo: query users list, returning only name column
    /// Deprecated alias note: was `users`, renamed to avoid conflict with direct table resolver.
    async fn users_demo(&self, ctx: &Context<'_>) -> FieldResult<Vec<UserStub>> {
        let db = ctx.data::<Arc<DatabaseConnection>>()?;
        debug!("Querying users.name for demo");

        let backend = db.as_ref().get_database_backend();
        let stmt = match backend {
            DbBackend::Postgres => Statement::from_sql_and_values(
                DbBackend::Postgres,
                "SELECT name FROM users LIMIT 50",
                vec![],
            ),
            DbBackend::MySql => Statement::from_sql_and_values(
                DbBackend::MySql,
                "SELECT name FROM users LIMIT 50",
                vec![],
            ),
            DbBackend::Sqlite => Statement::from_sql_and_values(
                DbBackend::Sqlite,
                "SELECT name FROM users LIMIT 50",
                vec![],
            ),
        };

        let rows = db.as_ref().query_all(stmt).await?;
        let mut out = Vec::new();
        for row in rows {
            let name: String = row.try_get("", "name")?;
            out.push(UserStub { name });
        }
        Ok(out)
    }
    
    /// Health check
    async fn health(&self) -> FieldResult<String> {
        Ok("OK".to_string())
    }

    /// Direct table aliases (no need for table(name: "..."))
    ///
    /// Example:
    /// query {
    ///   users {
    ///     byId(id: "1")
    ///     all(pagination: { limit: 5, offset: 0 })
    ///   }
    /// }
    ///
    /// Tip: To support more tables directly, add more functions like this,
    /// or refactor to generate them from configuration/metadata.
    async fn users(&self) -> FieldResult<TableResolver> {
        Ok(TableResolver::new("users".to_string()))
    }

    /// Dynamic table entry: return a resolver bound to the given table
    async fn table(&self, _ctx: &Context<'_>, name: String) -> FieldResult<TableResolver> {
        Ok(TableResolver::new(name))
    }

    /// List rows of any table with pagination
    async fn rows(
        &self,
        ctx: &Context<'_>,
        table: String,
        pagination: Option<PaginationInput>,
    ) -> FieldResult<JsonConnection> {
        let resolver = TableResolver::new(table);
        let json = resolver.all(ctx, pagination).await?;
        let nodes = json.get("data").and_then(|v| v.as_array()).cloned().unwrap_or_default();
        let limit = json
            .get("pagination").and_then(|p| p.get("limit")).and_then(|v| v.as_i64())
            .unwrap_or(50) as i32;
        let offset = json
            .get("pagination").and_then(|p| p.get("offset")).and_then(|v| v.as_i64())
            .unwrap_or(0) as i32;
        let total_count = json.get("totalCount").and_then(|v| v.as_i64())
            .unwrap_or(nodes.len() as i64) as i32;
        Ok(JsonConnection::new(nodes, total_count, limit, offset))
    }
}

/// Mutation resolver
pub struct MutationResolver;

#[Object]
impl MutationResolver {
    /// Refresh database metadata (useful for development)
    async fn refresh_metadata(&self, ctx: &Context<'_>) -> FieldResult<bool> {
        let db = ctx.data::<Arc<DatabaseConnection>>()?;
        let introspector = DatabaseIntrospector::new(Arc::clone(db));
        
        info!("Refreshing database metadata");
        let _metadata = introspector.get_database_metadata().await?;
        
        Ok(true)
    }
}

/// Dynamic table resolver
pub struct TableResolver {
    table_name: String,
}

impl TableResolver {
    /// Create a new table resolver
    pub fn new(table_name: String) -> Self {
        Self { table_name }
    }
}

#[Object]
impl TableResolver {
    /// Get all records from the table
    async fn all(
        &self,
        ctx: &Context<'_>,
        pagination: Option<PaginationInput>,
    ) -> FieldResult<serde_json::Value> {
        let db = ctx.data::<Arc<DatabaseConnection>>()?;
        let pagination = pagination.unwrap_or_default();
        let limit = pagination.limit.unwrap_or(50).max(1);
        let offset = pagination.offset.unwrap_or(0).max(0);

        debug!("Fetching all records from table: {}", self.table_name);

        // Real-time introspection: determine schema and columns
        let introspector = DatabaseIntrospector::new(Arc::clone(db));
        let tables = introspector.get_tables().await?;
        // Prefer public schema if multiple matches, with fuzzy name matching (singular/plural, case-insensitive)
        let name_lc = self.table_name.to_lowercase();
        let mut candidates = vec![name_lc.clone()];
        if !name_lc.ends_with('s') {
            candidates.push(format!("{}s", name_lc));
        } else if name_lc.ends_with('s') && name_lc.len() > 1 {
            candidates.push(name_lc.trim_end_matches('s').to_string());
        }
        let (schema, table) = tables
            .iter()
            .filter(|t| {
                let tname = t.name.to_lowercase();
                candidates.iter().any(|c| c == &tname)
            })
            .min_by_key(|t| if t.schema == "public" { 0 } else { 1 })
            .map(|t| (t.schema.clone(), t.name.clone()))
            .ok_or_else(|| async_graphql::Error::new(format!("Table '{}' not found (tried: {:?})", self.table_name, candidates)))?;

        let columns = introspector.get_columns_in_schema(&schema, &table).await?;
        let col_names: Vec<String> = columns.iter().map(|c| c.name.clone()).collect();
        let select_cols = if col_names.is_empty() { "*".to_string() } else { 
            col_names.iter().map(|c| format!("\"{}\"", c)).collect::<Vec<_>>().join(", ")
        };

        let backend = db.as_ref().get_database_backend();
        let qualified = match backend {
            DbBackend::Postgres | DbBackend::Sqlite => format!("\"{}\".\"{}\"", schema, table),
            DbBackend::MySql => format!("`{}`.`{}`", schema, table),
        };
        let stmt = Statement::from_string(backend, format!("SELECT {} FROM {} LIMIT {} OFFSET {}", select_cols, qualified, limit, offset));

        let rows = db.as_ref().query_all(stmt).await?;
        let mut data = Vec::new();
        for row in rows {
            let mut obj = serde_json::Map::new();
            for col in &col_names {
                let val = row.try_get("", col).map(|v: String| serde_json::Value::String(v)).ok()
                    .or_else(|| row.try_get("", col).map(|v: i64| serde_json::Value::Number(v.into())).ok())
                    .or_else(|| row.try_get("", col).map(|v: i32| serde_json::Value::Number(v.into())).ok())
                    .or_else(|| row.try_get("", col).map(|v: f64| serde_json::Value::Number(serde_json::Number::from_f64(v).unwrap_or(serde_json::Number::from(0)))).ok())
                    .or_else(|| row.try_get("", col).map(|v: bool| serde_json::Value::Bool(v)).ok())
                    .or_else(|| row.try_get("", col).map(|v: serde_json::Value| v).ok())
                    .unwrap_or(serde_json::Value::Null);
                obj.insert(col.clone(), val);
            }
            data.push(serde_json::Value::Object(obj));
        }

        // Total count
        let count_stmt = match backend {
            DbBackend::Postgres => Statement::from_string(DbBackend::Postgres, format!("SELECT COUNT(*) as cnt FROM \"{}\"", self.table_name)),
            DbBackend::MySql => Statement::from_string(DbBackend::MySql, format!("SELECT COUNT(*) as cnt FROM `{}`", self.table_name)),
            DbBackend::Sqlite => Statement::from_string(DbBackend::Sqlite, format!("SELECT COUNT(*) as cnt FROM \"{}\"", self.table_name)),
        };
        let total_count = db.as_ref().query_one(count_stmt).await?
            .and_then(|r| r.try_get("", "cnt").ok())
            .unwrap_or(0_i64) as i32;

        let result = serde_json::json!({
            "table": self.table_name,
            "data": data,
            "totalCount": total_count,
            "pagination": {
                "limit": limit,
                "offset": offset
            }
        });
        
        Ok(result)
    }
    
    /// Get a single record by ID (primary key)
    async fn by_id(
        &self,
        ctx: &Context<'_>,
        id: String,
    ) -> FieldResult<Option<serde_json::Value>> {
        let db = ctx.data::<Arc<DatabaseConnection>>()?;
        debug!("Fetching record by ID: {} from table: {}", id, self.table_name);

        // Real-time introspection: determine schema and PK
        let introspector = DatabaseIntrospector::new(Arc::clone(db));
        let tables = introspector.get_tables().await?;
        // Fuzzy name matching for primary key lookup
        let name_lc = self.table_name.to_lowercase();
        let mut candidates = vec![name_lc.clone()];
        if !name_lc.ends_with('s') {
            candidates.push(format!("{}s", name_lc));
        } else if name_lc.ends_with('s') && name_lc.len() > 1 {
            candidates.push(name_lc.trim_end_matches('s').to_string());
        }
        let (schema, table) = tables
            .iter()
            .filter(|t| {
                let tname = t.name.to_lowercase();
                candidates.iter().any(|c| c == &tname)
            })
            .min_by_key(|t| if t.schema == "public" { 0 } else { 1 })
            .map(|t| (t.schema.clone(), t.name.clone()))
            .ok_or_else(|| async_graphql::Error::new(format!("Table '{}' not found (tried: {:?})", self.table_name, candidates)))?;

        let columns = introspector.get_columns_in_schema(&schema, &table).await?;
        let pk_col = columns.iter().find(|c| c.is_primary_key)
            .map(|c| c.name.clone())
            .ok_or_else(|| async_graphql::Error::new(format!("Primary key not found for table '{}.{}'", schema, table)))?;

        let backend = db.as_ref().get_database_backend();
        let qualified = match backend {
            DbBackend::Postgres | DbBackend::Sqlite => format!("\"{}\".\"{}\"", schema, table),
            DbBackend::MySql => format!("`{}`.`{}`", schema, table),
        };

        let stmt = match backend {
            DbBackend::Postgres => Statement::from_sql_and_values(DbBackend::Postgres, 
                format!("SELECT * FROM {} WHERE \"{}\" = $1 LIMIT 1", qualified, pk_col), vec![id.clone().into()]),
            DbBackend::MySql => Statement::from_sql_and_values(DbBackend::MySql, 
                format!("SELECT * FROM {} WHERE `{}` = ? LIMIT 1", qualified, pk_col), vec![id.clone().into()]),
            DbBackend::Sqlite => Statement::from_sql_and_values(DbBackend::Sqlite, 
                format!("SELECT * FROM {} WHERE \"{}\" = ? LIMIT 1", qualified, pk_col), vec![id.clone().into()]),
        };

        if let Some(row) = db.as_ref().query_one(stmt).await? {
            let mut obj = serde_json::Map::new();
            for col in columns.iter().map(|c| c.name.clone()) {
                let val = row.try_get("", &col).map(|v: String| serde_json::Value::String(v)).ok()
                    .or_else(|| row.try_get("", &col).map(|v: i64| serde_json::Value::Number(v.into())).ok())
                    .or_else(|| row.try_get("", &col).map(|v: i32| serde_json::Value::Number(v.into())).ok())
                    .or_else(|| row.try_get("", &col).map(|v: f64| serde_json::Value::Number(serde_json::Number::from_f64(v).unwrap_or(serde_json::Number::from(0)))).ok())
                    .or_else(|| row.try_get("", &col).map(|v: bool| serde_json::Value::Bool(v)).ok())
                    .or_else(|| row.try_get("", &col).map(|v: serde_json::Value| v).ok())
                    .unwrap_or(serde_json::Value::Null);
                obj.insert(col, val);
            }
            Ok(Some(serde_json::Value::Object(obj)))
        } else {
            Ok(None)
        }
    }
}

/// Dynamic mutation resolver for tables
pub struct TableMutationResolver {
    table_name: String,
}

impl TableMutationResolver {
    /// Create a new table mutation resolver
    pub fn new(table_name: String) -> Self {
        Self { table_name }
    }
}

#[Object]
impl TableMutationResolver {
    /// Create a new record
    async fn create(
        &self,
        ctx: &Context<'_>,
        input: serde_json::Value,
    ) -> FieldResult<serde_json::Value> {
        let _db = ctx.data::<Arc<DatabaseConnection>>()?;
        
        info!("Creating record in table: {}", self.table_name);
        debug!("Input data: {:?}", input);
        
        // This is a simplified implementation
        // In a real implementation, you would:
        // 1. Validate the input against the table schema
        // 2. Build a dynamic INSERT query
        // 3. Execute the query using SeaORM
        // 4. Return the created record
        
        let result = serde_json::json!({
            "table": self.table_name,
            "created": true,
            "data": input
        });
        
        Ok(result)
    }
    
    /// Update a record by ID
    async fn update(
        &self,
        ctx: &Context<'_>,
        id: String,
        input: serde_json::Value,
    ) -> FieldResult<serde_json::Value> {
        let _db = ctx.data::<Arc<DatabaseConnection>>()?;
        
        info!("Updating record {} in table: {}", id, self.table_name);
        debug!("Input data: {:?}", input);
        
        // This is a simplified implementation
        // In a real implementation, you would:
        // 1. Validate the input against the table schema
        // 2. Build a dynamic UPDATE query
        // 3. Execute the query using SeaORM
        // 4. Return the updated record
        
        let result = serde_json::json!({
            "table": self.table_name,
            "id": id,
            "updated": true,
            "data": input
        });
        
        Ok(result)
    }
    
    /// Delete a record by ID
    async fn delete(
        &self,
        ctx: &Context<'_>,
        id: String,
    ) -> FieldResult<bool> {
        let _db = ctx.data::<Arc<DatabaseConnection>>()?;
        
        info!("Deleting record {} from table: {}", id, self.table_name);
        
        // This is a simplified implementation
        // In a real implementation, you would:
        // 1. Build a dynamic DELETE query
        // 2. Execute the query using SeaORM
        // 3. Return success/failure
        
        Ok(true)
    }

    /// Dynamic mutation entry: return a mutation resolver bound to the table
    async fn table_mutation(&self, _ctx: &Context<'_>, name: String) -> FieldResult<TableMutationResolver> {
        Ok(TableMutationResolver::new(name))
    }
}

#[cfg(test)]
mod tests {
    use super::*;
    
    #[test]
    fn test_table_resolver_creation() {
        let resolver = TableResolver::new("users".to_string());
        assert_eq!(resolver.table_name, "users");
    }
    
    #[test]
    fn test_table_mutation_resolver_creation() {
        let resolver = TableMutationResolver::new("users".to_string());
        assert_eq!(resolver.table_name, "users");
    }
}