package e2e

import (
	"context"
	"fmt"
	"time"

	"github.com/stretchr/testify/require"

	"github.com/PeerDB-io/peerdb/flow/internal"
)

func (s PeerFlowE2ETestSuiteS3) attachSchemaSuffix(tableName string) string {
	return fmt.Sprintf("e2e_test_%s.%s", s.suffix, tableName)
}

func (s PeerFlowE2ETestSuiteS3) attachSuffix(input string) string {
	return fmt.Sprintf("%s_%s", input, s.suffix)
}

func (s PeerFlowE2ETestSuiteS3) Test_Simple() {
	tc := NewTemporalClient(s.t)

	srcTableName := s.attachSchemaSuffix("test_simple_flow_s3")
	dstTableName := "peerdb_test_s3.test_simple_flow_s3"
	flowJobName := s.attachSuffix("test_simple_flow_s3")
	_, err := s.conn.Conn().Exec(s.t.Context(), fmt.Sprintf(`
		CREATE TABLE %s (
			id SERIAL PRIMARY KEY,
			key TEXT NOT NULL,
			value TEXT NOT NULL
		);
	`, srcTableName))
	require.NoError(s.t, err)
	connectionGen := FlowConnectionGenerationConfig{
		FlowJobName:      flowJobName,
		TableNameMapping: map[string]string{srcTableName: dstTableName},
		Destination:      s.Peer().Name,
	}

	flowConnConfig := connectionGen.GenerateFlowConnectionConfigs(s)
	flowConnConfig.MaxBatchSize = 5

	env := ExecutePeerflow(s.t, tc, flowConnConfig)
	SetupCDCFlowStatusQuery(s.t, env, flowConnConfig)
	// insert 20 rows
	for i := range 20 {
		testKey := fmt.Sprintf("test_key_%d", i)
		testValue := fmt.Sprintf("test_value_%d", i)
		_, err := s.conn.Conn().Exec(s.t.Context(),
			fmt.Sprintf(`INSERT INTO %s (key, value) VALUES ($1, $2)`, srcTableName), testKey, testValue)
		EnvNoError(s.t, env, err)
	}

	EnvWaitFor(s.t, env, 2*time.Minute, "waiting for blobs", func() bool {
		ctx, cancel := context.WithTimeout(s.t.Context(), 25*time.Second)
		defer cancel()
		files, err := s.s3Helper.ListAllFiles(ctx, flowJobName)
		EnvNoError(s.t, env, err)
		s.t.Logf("Files in %s: %d", flowJobName, len(files))
		return len(files) == 4
	})

	pool, err := internal.GetCatalogConnectionPoolFromEnv(s.t.Context())
	require.NoError(s.t, err)
	EnvWaitFor(s.t, env, time.Minute, "waiting for cdc batch completion", func() bool {
		// s3 normalize is nop, so check peerdb_stats directly that batch finalized
		var count int64
		require.NoError(s.t, pool.QueryRow(s.t.Context(),
			"select count(*) from peerdb_stats.cdc_batches where flow_name = $1 and end_time is not null",
			flowJobName,
		).Scan(&count))
		return count == 4
	})

	env.Cancel(s.t.Context())
	RequireEnvCanceled(s.t, env)
}

func (s PeerFlowE2ETestSuiteS3) Test_OriginMetadata() {
	tc := NewTemporalClient(s.t)

	srcTableName := s.attachSchemaSuffix("origin_metadata")
	dstTableName := "peerdb_test_s3.origin_metadata"
	flowJobName := s.attachSuffix("origin_metadata")

	_, err := s.conn.Conn().Exec(s.t.Context(), fmt.Sprintf(`
		CREATE TABLE %s (
			id SERIAL PRIMARY KEY,
			val TEXT NOT NULL
		);
	`, srcTableName))
	require.NoError(s.t, err)
	connectionGen := FlowConnectionGenerationConfig{
		FlowJobName:      flowJobName,
		TableNameMapping: map[string]string{srcTableName: dstTableName},
		Destination:      s.Peer().Name,
	}

	flowConnConfig := connectionGen.GenerateFlowConnectionConfigs(s)
	flowConnConfig.MaxBatchSize = 5
	flowConnConfig.Env = map[string]string{"PEERDB_ORIGIN_METADATA_AS_DESTINATION_COLUMN": "true"}

	env := ExecutePeerflow(s.t, tc, flowConnConfig)
	SetupCDCFlowStatusQuery(s.t, env, flowConnConfig)
	// insert 20 rows
	for i := range 20 {
		_, err := s.conn.Conn().Exec(s.t.Context(),
			fmt.Sprintf(`INSERT INTO %s (val) VALUES ($1)`, srcTableName), fmt.Sprintf("test_value_%d", i))
		EnvNoError(s.t, env, err)
	}

	EnvWaitFor(s.t, env, 2*time.Minute, "waiting for blobs", func() bool {
		ctx, cancel := context.WithTimeout(s.t.Context(), 25*time.Second)
		defer cancel()
		files, err := s.s3Helper.ListAllFiles(ctx, flowJobName)
		EnvNoError(s.t, env, err)
		s.t.Logf("Files in %s: %d", flowJobName, len(files))
		return len(files) == 4
	})

	env.Cancel(s.t.Context())
	RequireEnvCanceled(s.t, env)
}
