package collector

import (
	"database/sql"
	"fmt"
	"log/slog"
	"time"

	"github.com/prometheus/client_golang/prometheus"
)

const (
	backupSubsystem = "backup"
	// Queries
	//  backupSetQuery        = `SELECT DEVICE_TYPE, BACKUP_ID, BACKUP_NAME, BACKUP_PATH, TYPE, LEVEL, RANGE#, OBJECT_NAME, BASE_NAME, BACKUP_TIME, ENCRYPT_TYPE, COMPRESS_LEVEL, WITHOUT_LOG, BEGIN_LSN, END_LSN, BKP_NUM, CUMULATIVE FROM v$backupset`
	// backupSetPieceQuery   = `SELECT DEVICE_TYPE, BACKUP_ID, BACKUPNAME, BACKUPPATH, BKP_NTH, FILE_NAME, BKP_LEN FROM V$BACKUPSET_BKP`
	// backupSetArchiveQuery = `SELECT DEVICE_TYPE, BACKUP_ID, BACKUPNAME, BACKUPPATH, FILE_SEQ, FILE_NAME, FILE_LEN, BEGIN_LSN, END_LSN, CREATE_TIME, CLOSE_TIME FROM V$BACKUPSET_ARCH WHERE FILE_NAME != ''`
	backupHistoryQuery = `select PATH,START_TIME,END_TIME from V$BACKUP_HISTORY`
)

var (
	backupSetInfoDesc = prometheus.NewDesc(
		prometheus.BuildFQName(namespace, backupSubsystem, "set_info"),
		"Information about backup sets.",
		[]string{"device_type", "backup_id", "backup_name", "backup_path", "type", "level", "range", "object_name", "base_name", "encrypt_type", "compress_level", "without_log", "cumulative"}, nil,
	)
	backupSetTimeDesc = prometheus.NewDesc(
		prometheus.BuildFQName(namespace, backupSubsystem, "set_backup_time_seconds"),
		"The timestamp of the backup set.",
		[]string{"backup_id"}, nil,
	)
	backupSetLsnDesc = prometheus.NewDesc(
		prometheus.BuildFQName(namespace, backupSubsystem, "set_lsn"),
		"LSN information for the backup set.",
		[]string{"backup_id", "type"}, nil,
	)
	backupSetPieceCountDesc = prometheus.NewDesc(
		prometheus.BuildFQName(namespace, backupSubsystem, "set_piece_count"),
		"Number of backup pieces in the backup set.",
		[]string{"backup_id"}, nil,
	)
	backupSetPieceSizeBytesDesc = prometheus.NewDesc(
		prometheus.BuildFQName(namespace, backupSubsystem, "set_piece_size_bytes"),
		"Size of each backup piece.",
		[]string{"backup_id", "backup_name", "backup_path", "piece_number", "file_name"}, nil,
	)
	backupSetArchiveSizeBytesDesc = prometheus.NewDesc(
		prometheus.BuildFQName(namespace, backupSubsystem, "set_archive_size_bytes"),
		"Size of each archive file in the backup set.",
		[]string{"backup_id", "backup_name", "backup_path", "file_seq", "file_name"}, nil,
	)
	// backupHistoryStatusDesc = prometheus.NewDesc(
	// 	prometheus.BuildFQName(namespace, backupSubsystem, "history_status"),
	// 	"The status of a historical backup (1 for success, 0 for failure).",
	// 	[]string{"backup_id", "backup_range", "backup_type", "error", "start_time"}, nil,
	// )
	backupHistoryCompletionTimeDesc = prometheus.NewDesc(
		prometheus.BuildFQName(namespace, backupSubsystem, "history_completion_time_seconds"),
		"The completion time of a historical backup.",
		[]string{"path", "start_time", "end_time"}, nil,
	)
	// backupHistorySizeBytesDesc = prometheus.NewDesc(
	// 	prometheus.BuildFQName(namespace, backupSubsystem, "history_size_bytes"),
	// 	"The size of a historical backup.",
	// 	[]string{"backup_id", "backup_type", "direction"}, nil,
	// )
)

type Backup struct{}

func init() {
	RegisterScraper("backup", Backup{})
}

func (Backup) Name() string {
	return "backup"
}

func (Backup) Scrape(db *sql.DB, ch chan<- prometheus.Metric, backupPath string, logger *slog.Logger) error {
	// This query does not need the backup path function.
	if err := scrapeBackupHistory(db, ch, logger); err != nil {
		logger.Error("failed to scrape backup history", "error", err)
	}

	// The following queries require the backup path function.
	if backupPath == "" {
		logger.Info("backup_path is not configured, skipping backup set scrapes.")
		return nil
	}

	// Execute the function to set the backup directory.
	_, err := db.Exec(fmt.Sprintf("sf_bakset_backup_dir_add('DISK','%s')", backupPath))
	if err != nil {
		return fmt.Errorf("failed to execute sf_bakset_backup_dir_add: %w", err)
	}

	// if err := scrapeBackupSet(db, ch, logger); err != nil {
	// 	logger.Error("failed to scrape backup set", "error", err)
	// }
	// if err := scrapeBackupSetPiece(db, ch, logger); err != nil {
	// 	logger.Error("failed to scrape backup set piece", "error", err)
	// }
	// if err := scrapeBackupSetArchive(db, ch, logger); err != nil {
	// 	logger.Error("failed to scrape backup set archive", "error", err)
	// }

	return nil
}

// func scrapeBackupSet(db *sql.DB, ch chan<- prometheus.Metric, logger *slog.Logger) error {
// 	rows, err := db.Query(backupSetQuery)
// 	if err != nil {
// 		return err
// 	}
// 	defer rows.Close()

// 	for rows.Next() {
// 		var (
// 			deviceType, backupName, backupPath, objectName, baseName                                          string
// 			backupId, backupType, level, rangeNum, encryptType, compressLevel, withoutLog, bkpNum, cumulative int64
// 			beginLsn, endLsn                                                                                  int64
// 			backupTime                                                                                        time.Time
// 		)
// 		if err := rows.Scan(&deviceType, &backupId, &backupName, &backupPath, &backupType, &level, &rangeNum, &objectName, &baseName, &backupTime, &encryptType, &compressLevel, &withoutLog, &beginLsn, &endLsn, &bkpNum, &cumulative); err != nil {
// 			logger.Error("failed to scan backup set row", "error", err)
// 			continue
// 		}
// 		ch <- prometheus.MustNewConstMetric(backupSetInfoDesc, prometheus.GaugeValue, 1,
// 			deviceType, fmt.Sprint(backupId), backupName, backupPath, fmt.Sprint(backupType), fmt.Sprint(level), fmt.Sprint(rangeNum), objectName, baseName, fmt.Sprint(encryptType), fmt.Sprint(compressLevel), fmt.Sprint(withoutLog), fmt.Sprint(cumulative),
// 		)
// 		ch <- prometheus.MustNewConstMetric(backupSetTimeDesc, prometheus.GaugeValue, float64(backupTime.Unix()), fmt.Sprint(backupId))
// 		ch <- prometheus.MustNewConstMetric(backupSetLsnDesc, prometheus.GaugeValue, float64(beginLsn), fmt.Sprint(backupId), "begin")
// 		ch <- prometheus.MustNewConstMetric(backupSetLsnDesc, prometheus.GaugeValue, float64(endLsn), fmt.Sprint(backupId), "end")
// 		ch <- prometheus.MustNewConstMetric(backupSetPieceCountDesc, prometheus.GaugeValue, float64(bkpNum), fmt.Sprint(backupId))
// 	}
// 	return nil
// }

// func scrapeBackupSetPiece(db *sql.DB, ch chan<- prometheus.Metric, logger *slog.Logger) error {
// 	rows, err := db.Query(backupSetPieceQuery)
// 	if err != nil {
// 		return err
// 	}
// 	defer rows.Close()

// 	for rows.Next() {
// 		var (
// 			deviceType, backupName, backupPath, fileName string
// 			backupId, bkpNth                             int64
// 			bkpLen                                       float64
// 		)
// 		if err := rows.Scan(&deviceType, &backupId, &backupName, &backupPath, &bkpNth, &fileName, &bkpLen); err != nil {
// 			logger.Error("failed to scan backup set piece row", "error", err)
// 			continue
// 		}
// 		ch <- prometheus.MustNewConstMetric(backupSetPieceSizeBytesDesc, prometheus.GaugeValue, bkpLen,
// 			fmt.Sprint(backupId), backupName, backupPath, fmt.Sprint(bkpNth), fileName,
// 		)
// 	}
// 	return nil
// }

// func scrapeBackupSetArchive(db *sql.DB, ch chan<- prometheus.Metric, logger *slog.Logger) error {
// 	rows, err := db.Query(backupSetArchiveQuery)
// 	if err != nil {
// 		return err
// 	}
// 	defer rows.Close()

// 	for rows.Next() {
// 		var (
// 			deviceType, backupName, backupPath, fileName string
// 			backupId, fileSeq                            int64
// 			fileLen                                      float64
// 			beginLsn, endLsn                             int64
// 			createTime, closeTime                        time.Time
// 		)
// 		if err := rows.Scan(&deviceType, &backupId, &backupName, &backupPath, &fileSeq, &fileName, &fileLen, &beginLsn, &endLsn, &createTime, &closeTime); err != nil {
// 			logger.Error("failed to scan backup set archive row", "error", err)
// 			continue
// 		}
// 		ch <- prometheus.MustNewConstMetric(backupSetArchiveSizeBytesDesc, prometheus.GaugeValue, fileLen,
// 			fmt.Sprint(backupId), backupName, backupPath, fmt.Sprint(fileSeq), fileName,
// 		)
// 	}
// 	return nil
// }

func scrapeBackupHistory(db *sql.DB, ch chan<- prometheus.Metric, logger *slog.Logger) error {
	rows, err := db.Query(backupHistoryQuery)
	if err != nil {
		return err
	}
	defer rows.Close()

	for rows.Next() {
		var (
			path               sql.NullString
			startTime, endTime sql.NullTime
		)
		if err := rows.Scan(&path, &startTime, &endTime); err != nil {
			logger.Error("failed to scan backup set archive row", "error", err)
			continue
		}
		ch <- prometheus.MustNewConstMetric(backupHistoryCompletionTimeDesc, prometheus.GaugeValue, float64(endTime.Time.Unix()-startTime.Time.Unix()),
			fmt.Sprint(path), startTime.Time.Local().Format(time.RFC3339), endTime.Time.Local().Format(time.RFC3339))

	}
	return nil
}
