//go:build database
// +build database

package connector

import (
	"context"
	"database/sql"
	"encoding/json"
	"fmt"
	"strings"
	"sync"
	"time"

	_ "github.com/go-sql-driver/mysql"
	_ "github.com/lib/pq"
	_ "github.com/mattn/go-sqlite3"
)

// DatabaseSourceConnector represents a database source connector
type DatabaseSourceConnector struct {
	// config is the connector configuration
	config map[string]interface{}
	// db is the database connection
	db *sql.DB
	// metrics are the connector metrics
	metrics map[string]interface{}
	// mu is the mutex
	mu sync.RWMutex
	// started indicates if the connector is started
	started bool
	// driver is the database driver
	driver string
	// dsn is the database connection string
	dsn string
	// query is the SQL query
	query string
	// params are the query parameters
	params []interface{}
	// batchSize is the batch size
	batchSize int
	// offset is the current offset
	offset int
	// keyColumn is the key column
	keyColumn string
}

// NewDatabaseSourceConnector creates a new database source connector
func NewDatabaseSourceConnector() SourceConnector {
	return &DatabaseSourceConnector{
		metrics:   make(map[string]interface{}),
		batchSize: 10,
		offset:    0,
	}
}

// Initialize initializes the connector
func (c *DatabaseSourceConnector) Initialize(config map[string]interface{}) error {
	c.mu.Lock()
	defer c.mu.Unlock()

	// Store the configuration
	c.config = config

	// Get driver
	if driver, ok := config["driver"].(string); ok {
		c.driver = driver
	} else {
		return fmt.Errorf("driver is required")
	}

	// Get DSN
	if dsn, ok := config["dsn"].(string); ok {
		c.dsn = dsn
	} else {
		return fmt.Errorf("dsn is required")
	}

	// Get query
	if query, ok := config["query"].(string); ok {
		c.query = query
	} else {
		return fmt.Errorf("query is required")
	}

	// Get params
	if params, ok := config["params"].([]interface{}); ok {
		c.params = params
	}

	// Get batch size
	if batchSize, ok := config["batch_size"].(float64); ok {
		c.batchSize = int(batchSize)
	}

	// Get key column
	if keyColumn, ok := config["key_column"].(string); ok {
		c.keyColumn = keyColumn
	}

	// Initialize metrics
	c.metrics["records_total"] = 0
	c.metrics["records_read"] = 0
	c.metrics["start_time"] = time.Now().Unix()

	return nil
}

// Start starts the connector
func (c *DatabaseSourceConnector) Start(ctx context.Context) error {
	c.mu.Lock()
	defer c.mu.Unlock()

	// Check if already started
	if c.started {
		return fmt.Errorf("connector already started")
	}

	// Open database connection
	db, err := sql.Open(c.driver, c.dsn)
	if err != nil {
		return fmt.Errorf("failed to open database connection: %w", err)
	}
	c.db = db

	// Test connection
	err = db.Ping()
	if err != nil {
		return fmt.Errorf("failed to ping database: %w", err)
	}

	c.started = true

	return nil
}

// Stop stops the connector
func (c *DatabaseSourceConnector) Stop(ctx context.Context) error {
	c.mu.Lock()
	defer c.mu.Unlock()

	// Check if already stopped
	if !c.started {
		return fmt.Errorf("connector not started")
	}

	// Close database connection
	err := c.db.Close()
	if err != nil {
		return fmt.Errorf("failed to close database connection: %w", err)
	}

	c.started = false

	return nil
}

// Read reads records from the source
func (c *DatabaseSourceConnector) Read(ctx context.Context) (*RecordBatch, error) {
	c.mu.Lock()
	defer c.mu.Unlock()

	// Check if started
	if !c.started {
		return nil, fmt.Errorf("connector not started")
	}

	// Create query with limit and offset
	query := c.query
	if !strings.Contains(strings.ToUpper(query), "LIMIT") {
		query = fmt.Sprintf("%s LIMIT %d OFFSET %d", query, c.batchSize, c.offset)
	}

	// Execute query
	rows, err := c.db.QueryContext(ctx, query, c.params...)
	if err != nil {
		return nil, fmt.Errorf("failed to execute query: %w", err)
	}
	defer rows.Close()

	// Get column names
	columns, err := rows.Columns()
	if err != nil {
		return nil, fmt.Errorf("failed to get column names: %w", err)
	}

	// Create records
	records := make([]*Record, 0, c.batchSize)
	for rows.Next() {
		// Create values
		values := make([]interface{}, len(columns))
		valuePtrs := make([]interface{}, len(columns))
		for i := range values {
			valuePtrs[i] = &values[i]
		}

		// Scan row
		err := rows.Scan(valuePtrs...)
		if err != nil {
			return nil, fmt.Errorf("failed to scan row: %w", err)
		}

		// Create record
		record := make(map[string]interface{})
		var key []byte
		for i, col := range columns {
			val := values[i]
			if val == nil {
				record[col] = nil
			} else {
				switch v := val.(type) {
				case []byte:
					record[col] = string(v)
				default:
					record[col] = v
				}
			}

			// Set key
			if c.keyColumn != "" && col == c.keyColumn {
				switch v := val.(type) {
				case []byte:
					key = v
				default:
					key = []byte(fmt.Sprintf("%v", v))
				}
			}
		}

		// Marshal record to JSON
		data, err := json.Marshal(record)
		if err != nil {
			return nil, fmt.Errorf("failed to marshal record: %w", err)
		}

		// Create record
		rec := NewRecord(key, data)
		rec.Timestamp = time.Now()
		records = append(records, rec)
	}

	// Check for errors
	err = rows.Err()
	if err != nil {
		return nil, fmt.Errorf("failed to iterate rows: %w", err)
	}

	// Check if no records were read
	if len(records) == 0 {
		return nil, nil
	}

	// Update offset
	c.offset += len(records)

	// Update metrics
	c.metrics["records_read"] = c.metrics["records_read"].(int) + len(records)
	c.metrics["last_read_time"] = time.Now().Unix()

	return NewRecordBatch(records), nil
}

// Commit commits the offset
func (c *DatabaseSourceConnector) Commit(ctx context.Context, offset interface{}) error {
	c.mu.Lock()
	defer c.mu.Unlock()

	// Check if offset is valid
	if offset == nil {
		return nil
	}

	// Convert offset to int
	pos, ok := offset.(int)
	if !ok {
		return fmt.Errorf("invalid offset type")
	}

	// Update offset
	c.offset = pos

	return nil
}

// GetMetrics gets connector metrics
func (c *DatabaseSourceConnector) GetMetrics() map[string]interface{} {
	c.mu.RLock()
	defer c.mu.RUnlock()

	// Copy metrics
	metrics := make(map[string]interface{})
	for k, v := range c.metrics {
		metrics[k] = v
	}

	return metrics
}

// DatabaseSinkConnector represents a database sink connector
type DatabaseSinkConnector struct {
	// config is the connector configuration
	config map[string]interface{}
	// db is the database connection
	db *sql.DB
	// metrics are the connector metrics
	metrics map[string]interface{}
	// mu is the mutex
	mu sync.RWMutex
	// started indicates if the connector is started
	started bool
	// driver is the database driver
	driver string
	// dsn is the database connection string
	dsn string
	// table is the database table
	table string
	// columns are the table columns
	columns []string
	// batchSize is the batch size
	batchSize int
	// buffer is the record buffer
	buffer []*Record
}

// NewDatabaseSinkConnector creates a new database sink connector
func NewDatabaseSinkConnector() SinkConnector {
	return &DatabaseSinkConnector{
		metrics:   make(map[string]interface{}),
		batchSize: 10,
		buffer:    make([]*Record, 0),
	}
}

// Initialize initializes the connector
func (c *DatabaseSinkConnector) Initialize(config map[string]interface{}) error {
	c.mu.Lock()
	defer c.mu.Unlock()

	// Store the configuration
	c.config = config

	// Get driver
	if driver, ok := config["driver"].(string); ok {
		c.driver = driver
	} else {
		return fmt.Errorf("driver is required")
	}

	// Get DSN
	if dsn, ok := config["dsn"].(string); ok {
		c.dsn = dsn
	} else {
		return fmt.Errorf("dsn is required")
	}

	// Get table
	if table, ok := config["table"].(string); ok {
		c.table = table
	} else {
		return fmt.Errorf("table is required")
	}

	// Get columns
	if columns, ok := config["columns"].([]interface{}); ok {
		c.columns = make([]string, len(columns))
		for i, col := range columns {
			if colStr, ok := col.(string); ok {
				c.columns[i] = colStr
			} else {
				return fmt.Errorf("column must be a string")
			}
		}
	}

	// Get batch size
	if batchSize, ok := config["batch_size"].(float64); ok {
		c.batchSize = int(batchSize)
	}

	// Initialize metrics
	c.metrics["records_total"] = 0
	c.metrics["start_time"] = time.Now().Unix()

	return nil
}

// Start starts the connector
func (c *DatabaseSinkConnector) Start(ctx context.Context) error {
	c.mu.Lock()
	defer c.mu.Unlock()

	// Check if already started
	if c.started {
		return fmt.Errorf("connector already started")
	}

	// Open database connection
	db, err := sql.Open(c.driver, c.dsn)
	if err != nil {
		return fmt.Errorf("failed to open database connection: %w", err)
	}
	c.db = db

	// Test connection
	err = db.Ping()
	if err != nil {
		return fmt.Errorf("failed to ping database: %w", err)
	}

	// Get columns if not provided
	if len(c.columns) == 0 {
		// Get columns from table
		query := fmt.Sprintf("SELECT * FROM %s LIMIT 0", c.table)
		rows, err := db.QueryContext(ctx, query)
		if err != nil {
			return fmt.Errorf("failed to get columns: %w", err)
		}
		defer rows.Close()

		// Get column names
		columns, err := rows.Columns()
		if err != nil {
			return fmt.Errorf("failed to get column names: %w", err)
		}

		c.columns = columns
	}

	c.started = true

	return nil
}

// Stop stops the connector
func (c *DatabaseSinkConnector) Stop(ctx context.Context) error {
	c.mu.Lock()
	defer c.mu.Unlock()

	// Check if already stopped
	if !c.started {
		return fmt.Errorf("connector not started")
	}

	// Flush buffer
	err := c.flushBuffer(ctx)
	if err != nil {
		return fmt.Errorf("failed to flush buffer: %w", err)
	}

	// Close database connection
	err = c.db.Close()
	if err != nil {
		return fmt.Errorf("failed to close database connection: %w", err)
	}

	c.started = false

	return nil
}

// Write writes records to the sink
func (c *DatabaseSinkConnector) Write(ctx context.Context, batch *RecordBatch) error {
	c.mu.Lock()
	defer c.mu.Unlock()

	// Check if started
	if !c.started {
		return fmt.Errorf("connector not started")
	}

	// Check if batch is valid
	if batch == nil || len(batch.Records) == 0 {
		return nil
	}

	// Add records to buffer
	c.buffer = append(c.buffer, batch.Records...)

	// Check if buffer is full
	if len(c.buffer) >= c.batchSize {
		// Flush buffer
		err := c.flushBuffer(ctx)
		if err != nil {
			return fmt.Errorf("failed to flush buffer: %w", err)
		}
	}

	// Update metrics
	c.metrics["records_total"] = c.metrics["records_total"].(int) + len(batch.Records)
	c.metrics["last_write_time"] = time.Now().Unix()

	return nil
}

// Flush flushes any buffered records
func (c *DatabaseSinkConnector) Flush(ctx context.Context) error {
	c.mu.Lock()
	defer c.mu.Unlock()

	// Check if started
	if !c.started {
		return fmt.Errorf("connector not started")
	}

	// Flush buffer
	err := c.flushBuffer(ctx)
	if err != nil {
		return fmt.Errorf("failed to flush buffer: %w", err)
	}

	return nil
}

// GetMetrics gets connector metrics
func (c *DatabaseSinkConnector) GetMetrics() map[string]interface{} {
	c.mu.RLock()
	defer c.mu.RUnlock()

	// Copy metrics
	metrics := make(map[string]interface{})
	for k, v := range c.metrics {
		metrics[k] = v
	}

	return metrics
}

// flushBuffer flushes the record buffer
func (c *DatabaseSinkConnector) flushBuffer(ctx context.Context) error {
	// Check if buffer is empty
	if len(c.buffer) == 0 {
		return nil
	}

	// Create placeholders
	placeholders := make([]string, len(c.columns))
	for i := range placeholders {
		placeholders[i] = "?"
	}

	// Create query
	query := fmt.Sprintf("INSERT INTO %s (%s) VALUES (%s)",
		c.table,
		strings.Join(c.columns, ", "),
		strings.Join(placeholders, ", "),
	)

	// Replace placeholders for PostgreSQL
	if c.driver == "postgres" {
		for i := range placeholders {
			placeholders[i] = fmt.Sprintf("$%d", i+1)
		}
		query = fmt.Sprintf("INSERT INTO %s (%s) VALUES (%s)",
			c.table,
			strings.Join(c.columns, ", "),
			strings.Join(placeholders, ", "),
		)
	}

	// Start transaction
	tx, err := c.db.BeginTx(ctx, nil)
	if err != nil {
		return fmt.Errorf("failed to begin transaction: %w", err)
	}

	// Prepare statement
	stmt, err := tx.PrepareContext(ctx, query)
	if err != nil {
		tx.Rollback()
		return fmt.Errorf("failed to prepare statement: %w", err)
	}
	defer stmt.Close()

	// Insert records
	for _, record := range c.buffer {
		// Parse record
		var data map[string]interface{}
		err := json.Unmarshal(record.Value, &data)
		if err != nil {
			tx.Rollback()
			return fmt.Errorf("failed to unmarshal record: %w", err)
		}

		// Create values
		values := make([]interface{}, len(c.columns))
		for i, col := range c.columns {
			values[i] = data[col]
		}

		// Execute statement
		_, err = stmt.ExecContext(ctx, values...)
		if err != nil {
			tx.Rollback()
			return fmt.Errorf("failed to execute statement: %w", err)
		}
	}

	// Commit transaction
	err = tx.Commit()
	if err != nil {
		return fmt.Errorf("failed to commit transaction: %w", err)
	}

	// Clear buffer
	c.buffer = make([]*Record, 0)

	return nil
}
