package amplitude

import (
	"strings"
	"time"

	"github.com/BemiHQ/BemiDB/src/common"
)

const (
	PAGINATION_TIME_INTERVAL = time.Hour
	AMPLITUDE_DATA_DELAY     = time.Hour // Amplitude data is available for export after an up to 2-hour delay (1 full hour + truncated). https://amplitude.com/docs/apis/analytics/export

	CURSOR_COLUMN_NAME = "server_upload_time"

	COMPRESSION_FACTOR = 2 // 1 GB uncompressed data x 2 = ~90MB compressed data
)

type Syncer struct {
	Config       *Config
	Amplitude    *Amplitude
	StorageS3    *common.StorageS3
	DuckdbClient *common.DuckdbClient
}

func NewSyncer(config *Config, storageS3 *common.StorageS3, duckdbClient *common.DuckdbClient) *Syncer {
	return &Syncer{
		Config:       config,
		Amplitude:    NewAmplitude(config),
		StorageS3:    storageS3,
		DuckdbClient: duckdbClient,
	}
}

func (syncer *Syncer) Sync() {
	common.SendAnonymousAnalytics(syncer.Config.CommonConfig, "syncer-amplitude-start", syncer.name())

	cappedBuffer := common.NewCappedBuffer(syncer.Config.CommonConfig, common.DEFAULT_CAPPED_BUFFER_SIZE)
	jsonQueueWriter := common.NewJsonQueueWriter(cappedBuffer)

	icebergSchemaTable := common.IcebergSchemaTable{Schema: syncer.Config.DestinationSchemaName, Table: EVENTS_TABLE_NAME}
	icebergTable := common.NewIcebergTable(syncer.Config.CommonConfig, syncer.StorageS3, syncer.DuckdbClient, icebergSchemaTable)
	cursorValue := icebergTable.LastCursorValue(CURSOR_COLUMN_NAME)

	lastSyncedTime := syncer.Config.StartDate
	if cursorValue.StringValue != "" {
		lastSyncedTime = common.StringMsToUtcTime(cursorValue.StringValue).Truncate(time.Hour).Add(time.Hour) // add 1 hour to ensure we don't overlap
	}
	now := time.Now().UTC()
	endOfSyncWindow := now.Add(-AMPLITUDE_DATA_DELAY).Truncate(time.Hour)
	common.LogInfo(syncer.Config.CommonConfig, "Starting incremental sync from", lastSyncedTime, "to", endOfSyncWindow)

	// Copy from Amplitude to cappedBuffer in a separate goroutine in parallel
	go func() {
		for t := lastSyncedTime; t.Before(endOfSyncWindow); t = t.Add(PAGINATION_TIME_INTERVAL) {
			startTime := t
			endTime := t.Add(PAGINATION_TIME_INTERVAL - time.Hour) // -1 hour to ensure we don't overlap (Amplitude uses an inclusive end time)

			err := syncer.Amplitude.Export(jsonQueueWriter, startTime, endTime)
			if err != nil {
				if strings.Contains(err.Error(), "Raw data files were not found.") || strings.Contains(err.Error(), "404: Not Found") {
					common.LogInfo(syncer.Config.CommonConfig, "No data found for the time range", startTime, "to", endTime, "- will retry later.")
					break
				}
			}
			common.PanicIfError(syncer.Config.CommonConfig, err)
		}
		common.LogInfo(syncer.Config.CommonConfig, "Finished exporting data from Amplitude.")
		jsonQueueWriter.Close()
	}()

	syncer.WriteToIceberg(icebergTable, cursorValue, cappedBuffer)

	common.SendAnonymousAnalytics(syncer.Config.CommonConfig, "syncer-amplitude-finish", syncer.name())
}

func (syncer *Syncer) WriteToIceberg(icebergTable *common.IcebergTable, cursorValue common.CursorValue, cappedBuffer *common.CappedBuffer) {
	icebergSchemaColumns := EventsIcebergSchemaColumns(syncer.Config.CommonConfig)
	icebergTableWriter := common.NewIcebergTableWriter(syncer.Config.CommonConfig, syncer.StorageS3, syncer.DuckdbClient, icebergTable, icebergSchemaColumns, COMPRESSION_FACTOR)
	icebergTableWriter.AppendFromJsonCappedBuffer(cursorValue, cappedBuffer)
}

func (syncer *Syncer) name() string {
	return syncer.Config.ApiKey[:5] + "..."
}
