package connector

import (
	"context"
	"encoding/json"
	"fmt"
	"sync"
	"time"

	"github.com/xitongsys/parquet-go-source/local"
	"github.com/xitongsys/parquet-go/parquet"
	"github.com/xitongsys/parquet-go/reader"
	"github.com/xitongsys/parquet-go/writer"
)

// ParquetSourceConnector represents a Parquet file source connector
type ParquetSourceConnector struct {
	// config is the connector configuration
	config map[string]interface{}
	// file is the file handle
	file *local.LocalFile
	// reader is the Parquet reader
	reader *reader.ParquetReader
	// metrics are the connector metrics
	metrics map[string]interface{}
	// mu is the mutex
	mu sync.RWMutex
	// started indicates if the connector is started
	started bool
	// filePath is the file path
	filePath string
	// schema is the Parquet schema
	schema string
	// totalRows is the total number of rows in the file
	totalRows int64
	// currentRow is the current row index
	currentRow int64
}

// NewParquetSourceConnector creates a new Parquet file source connector
func NewParquetSourceConnector() SourceConnector {
	return &ParquetSourceConnector{
		metrics: make(map[string]interface{}),
	}
}

// Initialize initializes the connector
func (c *ParquetSourceConnector) Initialize(config map[string]interface{}) error {
	c.mu.Lock()
	defer c.mu.Unlock()

	// Store the configuration
	c.config = config

	// Get file path
	if path, ok := config["path"].(string); ok {
		c.filePath = path
	} else {
		return fmt.Errorf("path is required")
	}

	// Get schema
	if schema, ok := config["schema"].(string); ok {
		c.schema = schema
	}

	// Initialize metrics
	c.metrics["records_total"] = 0
	c.metrics["records_read"] = 0
	c.metrics["start_time"] = time.Now().Unix()

	return nil
}

// Start starts the connector
func (c *ParquetSourceConnector) Start(ctx context.Context) error {
	c.mu.Lock()
	defer c.mu.Unlock()

	// Check if already started
	if c.started {
		return fmt.Errorf("connector already started")
	}

	// Open the file using parquet-go-source
	pf, err := local.NewLocalFileReader(c.filePath)
	if err != nil {
		return fmt.Errorf("failed to open file: %w", err)
	}

	// Create a Parquet reader
	pr, err := reader.NewParquetReader(pf, nil, 4)
	if err != nil {
		pf.Close()
		return fmt.Errorf("failed to create Parquet reader: %w", err)
	}
	c.reader = pr

	// Get total rows
	c.totalRows = pr.GetNumRows()
	c.currentRow = 0

	// Update metrics
	c.metrics["records_total"] = c.totalRows

	c.started = true

	return nil
}

// Stop stops the connector
func (c *ParquetSourceConnector) Stop(ctx context.Context) error {
	c.mu.Lock()
	defer c.mu.Unlock()

	// Check if already stopped
	if !c.started {
		return fmt.Errorf("connector not started")
	}

	// Close the reader
	if c.reader != nil {
		c.reader.ReadStop()
		c.reader = nil
	}

	// Close the file
	if c.file != nil {
		err := c.file.Close()
		if err != nil {
			return fmt.Errorf("failed to close file: %w", err)
		}
		c.file = nil
	}

	c.started = false

	return nil
}

// Read reads records from the source
func (c *ParquetSourceConnector) Read(ctx context.Context) (*RecordBatch, error) {
	c.mu.Lock()
	defer c.mu.Unlock()

	// Check if started
	if !c.started {
		return nil, fmt.Errorf("connector not started")
	}

	// Check if reader is valid
	if c.reader == nil {
		return nil, fmt.Errorf("reader not initialized")
	}

	// Check if we've read all rows
	if c.currentRow >= c.totalRows {
		return nil, nil
	}

	// Get batch size
	batchSize := 10
	if size, ok := c.config["batch_size"].(int); ok {
		batchSize = size
	}

	// Adjust batch size if needed
	if c.currentRow+int64(batchSize) > c.totalRows {
		batchSize = int(c.totalRows - c.currentRow)
	}

	// Read rows
	rows := make([]map[string]interface{}, batchSize)
	if err := c.reader.Read(&rows); err != nil {
		return nil, fmt.Errorf("failed to read Parquet rows: %w", err)
	}

	// Convert rows to records
	records := make([]*Record, 0, len(rows))
	for _, row := range rows {
		// Convert to JSON
		jsonBytes, err := json.Marshal(row)
		if err != nil {
			return nil, fmt.Errorf("failed to marshal record: %w", err)
		}

		// Create record
		record := NewRecord(nil, jsonBytes)
		record.Timestamp = time.Now()

		// Set key if available
		if keyField, ok := c.config["key_field"].(string); ok {
			if keyValue, ok := row[keyField]; ok {
				record.Key = []byte(fmt.Sprintf("%v", keyValue))
			}
		}

		records = append(records, record)
	}

	// Update current row
	c.currentRow += int64(len(records))

	// Update metrics
	c.metrics["records_read"] = c.currentRow
	c.metrics["last_read_time"] = time.Now().Unix()

	return NewRecordBatch(records), nil
}

// Commit commits the offset
func (c *ParquetSourceConnector) Commit(ctx context.Context, offset interface{}) error {
	// Parquet source does not support commit
	return nil
}

// GetMetrics gets connector metrics
func (c *ParquetSourceConnector) GetMetrics() map[string]interface{} {
	c.mu.RLock()
	defer c.mu.RUnlock()

	// Copy metrics
	metrics := make(map[string]interface{})
	for k, v := range c.metrics {
		metrics[k] = v
	}

	return metrics
}

// ParquetSinkConnector represents a Parquet file sink connector
type ParquetSinkConnector struct {
	// config is the connector configuration
	config map[string]interface{}
	// file is the file handle
	file *local.LocalFile
	// writer is the Parquet writer
	writer *writer.ParquetWriter
	// metrics are the connector metrics
	metrics map[string]interface{}
	// mu is the mutex
	mu sync.RWMutex
	// started indicates if the connector is started
	started bool
	// filePath is the file path
	filePath string
	// schema is the Parquet schema
	schema string
	// compression is the compression codec
	compression string
	// append indicates if the file should be appended
	append bool
}

// NewParquetSinkConnector creates a new Parquet file sink connector
func NewParquetSinkConnector() SinkConnector {
	return &ParquetSinkConnector{
		metrics:     make(map[string]interface{}),
		compression: "snappy",
		append:      false,
	}
}

// Initialize initializes the connector
func (c *ParquetSinkConnector) Initialize(config map[string]interface{}) error {
	c.mu.Lock()
	defer c.mu.Unlock()

	// Store the configuration
	c.config = config

	// Get file path
	if path, ok := config["path"].(string); ok {
		c.filePath = path
	} else {
		return fmt.Errorf("path is required")
	}

	// Get schema
	if schema, ok := config["schema"].(string); ok {
		c.schema = schema
	}

	// Get compression
	if compression, ok := config["compression"].(string); ok {
		c.compression = compression
	}

	// Get append
	if append, ok := config["append"].(bool); ok {
		c.append = append
	}

	// Initialize metrics
	c.metrics["records_total"] = 0
	c.metrics["start_time"] = time.Now().Unix()

	return nil
}

// Start starts the connector
func (c *ParquetSinkConnector) Start(ctx context.Context) error {
	c.mu.Lock()
	defer c.mu.Unlock()

	// Check if already started
	if c.started {
		return fmt.Errorf("connector already started")
	}

	// Open the file using parquet-go-source
	pf, err := local.NewLocalFileWriter(c.filePath)
	if err != nil {
		return fmt.Errorf("failed to open file: %w", err)
	}

	// Create a Parquet writer
	// We'll use a generic map as the schema
	pw, err := writer.NewParquetWriter(pf, nil, 4)
	if err != nil {
		pf.Close()
		return fmt.Errorf("failed to create Parquet writer: %w", err)
	}

	// Set compression
	var compressionType parquet.CompressionCodec
	switch c.compression {
	case "snappy":
		compressionType = parquet.CompressionCodec_SNAPPY
	case "gzip":
		compressionType = parquet.CompressionCodec_GZIP
	case "lz4":
		compressionType = parquet.CompressionCodec_LZ4
	case "zstd":
		compressionType = parquet.CompressionCodec_ZSTD
	default:
		compressionType = parquet.CompressionCodec_SNAPPY
	}
	pw.CompressionType = compressionType

	c.writer = pw
	c.started = true

	return nil
}

// Stop stops the connector
func (c *ParquetSinkConnector) Stop(ctx context.Context) error {
	c.mu.Lock()
	defer c.mu.Unlock()

	// Check if already stopped
	if !c.started {
		return fmt.Errorf("connector not started")
	}

	// Close the writer
	if c.writer != nil {
		err := c.writer.WriteStop()
		if err != nil {
			return fmt.Errorf("failed to close Parquet writer: %w", err)
		}
		c.writer = nil
	}

	// Close the file
	if c.file != nil {
		err := c.file.Close()
		if err != nil {
			return fmt.Errorf("failed to close file: %w", err)
		}
		c.file = nil
	}

	c.started = false

	return nil
}

// Write writes records to the sink
func (c *ParquetSinkConnector) Write(ctx context.Context, batch *RecordBatch) error {
	c.mu.Lock()
	defer c.mu.Unlock()

	// Check if started
	if !c.started {
		return fmt.Errorf("connector not started")
	}

	// Check if batch is valid
	if batch == nil || len(batch.Records) == 0 {
		return nil
	}

	// Check if writer is valid
	if c.writer == nil {
		return fmt.Errorf("writer not initialized")
	}

	// Process each record
	for _, record := range batch.Records {
		// Parse record value
		var data map[string]interface{}
		err := json.Unmarshal(record.Value, &data)
		if err != nil {
			return fmt.Errorf("failed to parse record value: %w", err)
		}

		// Write row
		if err := c.writer.Write(data); err != nil {
			return fmt.Errorf("failed to write Parquet row: %w", err)
		}
	}

	// Update metrics
	c.metrics["records_total"] = c.metrics["records_total"].(int) + len(batch.Records)
	c.metrics["last_write_time"] = time.Now().Unix()

	return nil
}

// Flush flushes any buffered records
func (c *ParquetSinkConnector) Flush(ctx context.Context) error {
	c.mu.Lock()
	defer c.mu.Unlock()

	// Check if started
	if !c.started {
		return fmt.Errorf("connector not started")
	}

	// Check if writer is valid
	if c.writer == nil {
		return fmt.Errorf("writer not initialized")
	}

	// Parquet writer doesn't have a flush method
	// We'll just return success
	return nil
}

// GetMetrics gets connector metrics
func (c *ParquetSinkConnector) GetMetrics() map[string]interface{} {
	c.mu.RLock()
	defer c.mu.RUnlock()

	// Copy metrics
	metrics := make(map[string]interface{})
	for k, v := range c.metrics {
		metrics[k] = v
	}

	return metrics
}
