package endpoint

import (
	"context"
	"fmt"
	"github.com/ClickHouse/clickhouse-go/v2"
	"github.com/ClickHouse/clickhouse-go/v2/lib/driver"
	dgcoll "github.com/darwinOrg/go-common/collection"
	"github.com/darwinOrg/go-monitor"
	"github.com/go-mysql-org/go-mysql/canal"
	"github.com/go-mysql-org/go-mysql/mysql"
	"github.com/juju/errors"
	"myc/global"
	"myc/metrics"
	"myc/model"
	"myc/util/alarm"
	"myc/util/logs"
	"myc/util/stringutil"
	"strings"
	"time"
)

const (
	clickhousePingErrorCounter = "clickhouse_ping_error"
)

var (
	clickhousePingErrorLabelMap = map[string]string{"error": clickhousePingErrorCounter}
)

type ClickHouseEndpoint struct {
	hosts    []string
	username string
	password string
	conn     driver.Conn
}

func newClickHouseEndpoint() *ClickHouseEndpoint {
	hosts := strings.Split(global.Cfg().ClickHouseAddr, ",")
	r := &ClickHouseEndpoint{}
	r.hosts = hosts
	r.username = global.Cfg().ClickHouseUsername
	r.password = global.Cfg().ClickHousePassword
	return r
}

func (s *ClickHouseEndpoint) Name() string {
	return "clickhouse"
}

func (s *ClickHouseEndpoint) Connect() error {
	conn, err := clickhouse.Open(&clickhouse.Options{
		Addr: s.hosts,
		Auth: clickhouse.Auth{
			//Database: "default",
			Username: s.username,
			Password: s.password,
		},
		//TLS: &tls.Config{
		//	InsecureSkipVerify: true,
		//},
		Settings: clickhouse.Settings{
			"max_execution_time": 60,
		},
		DialTimeout: time.Second * 30,
		Compression: &clickhouse.Compression{
			Method: clickhouse.CompressionLZ4,
		},
		Debug:                true,
		BlockBufferSize:      10,
		MaxCompressionBuffer: 10240,
		ClientInfo: clickhouse.ClientInfo{
			Products: []struct {
				Name    string
				Version string
			}{
				{Name: "myc", Version: "0.1"},
			},
		},
	})
	if err != nil {
		return err
	}

	s.conn = conn
	return nil
}

func (s *ClickHouseEndpoint) Ping() error {
	if err := s.conn.Ping(context.Background()); err == nil {
		return nil
	}

	err := monitor.IncCounter(clickhousePingErrorCounter, clickhousePingErrorLabelMap)
	if err != nil {
		logs.Warnf("monitor.IncCounter(%s) error: %v", clickhousePingErrorCounter, err)
	}

	return errors.New("ssx")
}

func (s *ClickHouseEndpoint) Consume(_ mysql.Position, rows []*model.RowRequest) error {
	groupedRows := dgcoll.GroupBy(rows, func(row *model.RowRequest) string {
		rule, _ := global.RuleIns(row.RuleKey)
		if rule.TableColumnSize != len(row.Row) {
			logs.Warnf("%s schema mismatching", row.RuleKey)
			return ""
		}

		return rule.ClickHouseSchema + "." + rule.ClickHouseTable
	})

	for group, subRows := range groupedRows {
		if group == "" {
			continue
		}

		insertRows := dgcoll.FilterList(subRows, func(row *model.RowRequest) bool {
			return row.Action == canal.InsertAction
		})
		if len(insertRows) > 0 {
			err := s.batchInsert(group, insertRows)
			if err != nil {
				return err
			}
		}

		updateRows := dgcoll.FilterList(subRows, func(row *model.RowRequest) bool {
			return row.Action == canal.UpdateAction
		})
		if len(updateRows) > 0 {
			err := s.batchInsert(group, updateRows)
			if err != nil {
				return err
			} else {
				_ = s.conn.Exec(context.Background(), "optimize table "+group)
			}
		}

		deleteRows := dgcoll.FilterList(subRows, func(row *model.RowRequest) bool {
			return row.Action == canal.DeleteAction
		})
		if len(deleteRows) > 0 {
			err := s.batchDelete(group, deleteRows)
			if err != nil {
				return err
			}
		}
	}

	logs.Infof("%s 处理完成 %d 条数据", s.Name(), len(rows))
	return nil
}

func (s *ClickHouseEndpoint) Stock(rows []*model.RowRequest) int64 {
	groupedRows := dgcoll.GroupBy(rows, func(row *model.RowRequest) string {
		rule, _ := global.RuleIns(row.RuleKey)
		if rule.TableColumnSize != len(row.Row) {
			logs.Warnf("%s schema mismatching", row.RuleKey)
			return ""
		}

		return rule.ClickHouseSchema + "." + rule.ClickHouseTable
	})
	var count int64

	for group, subRows := range groupedRows {
		if group == "" {
			continue
		}

		insertRows := dgcoll.FilterList(subRows, func(row *model.RowRequest) bool {
			return row.Action == canal.InsertAction
		})
		if len(insertRows) > 0 {
			err := s.batchInsert(group, insertRows)
			if err != nil {
				alarm.EmergencyAlarm(err)
				return count
			}
		}

		updateRows := dgcoll.FilterList(subRows, func(row *model.RowRequest) bool {
			return row.Action == canal.UpdateAction
		})
		if len(updateRows) > 0 {
			err := s.batchInsert(group, updateRows)
			if err != nil {
				alarm.EmergencyAlarm(err)
				return count
			} else {
				_ = s.conn.Exec(context.Background(), "optimize table "+group)
			}
		}

		deleteRows := dgcoll.FilterList(subRows, func(row *model.RowRequest) bool {
			return row.Action == canal.DeleteAction
		})
		if len(deleteRows) > 0 {
			err := s.batchDelete(group, deleteRows)
			if err != nil {
				alarm.EmergencyAlarm(err)
				return count
			}
		}

		count += int64(len(subRows))
	}

	logs.Infof("%s 处理完成 %d 条数据", s.Name(), len(rows))

	return count
}

func (s *ClickHouseEndpoint) Close() {
	_ = s.conn.Close()
}

func (s *ClickHouseEndpoint) batchInsert(group string, rows []*model.RowRequest) error {
	rule, _ := global.RuleIns(rows[0].RuleKey)
	var columns []string
	for columnName, _ := range rule.PaddingMap {
		columns = append(columns, columnName)
	}

	execSql := fmt.Sprintf("INSERT INTO %s (%s)", group, strings.Join(columns, ", "))
	batch, err := s.conn.PrepareBatch(context.Background(), execSql)
	if err != nil {
		logs.Errorf("PrepareBatch[%s] error: %v", execSql, err)
		alarm.EmergencyAlarm(err)
		return err
	}

	for _, row := range rows {
		var values []any
		metrics.UpdateActionNum(row.Action, row.RuleKey)
		kvm := rowMap(row, rule, false)

		for _, columnName := range columns {
			if value, ok := kvm[columnName]; ok {
				values = append(values, value)
			}
		}

		err = batch.Append(values...)
		if err != nil {
			logs.Errorf("batch.Append[%s] error: %v", execSql, err)
			alarm.EmergencyAlarm(err)
			return err
		}
	}

	err = batch.Send()
	if err != nil {
		logs.Errorf("batch.Send[%s] error: %v", execSql, err)
		alarm.EmergencyAlarm(err)
		return err
	}

	return nil
}

func (s *ClickHouseEndpoint) batchUpdate(group string, rows []*model.RowRequest) error {
	rule, _ := global.RuleIns(rows[0].RuleKey)
	pkColumns := make([]string, len(rule.TableInfo.PKColumns))
	for i := 0; i < len(rule.TableInfo.PKColumns); i++ {
		index := rule.TableInfo.PKColumns[i]
		pkColumns[i] = rule.TableInfo.Columns[index].Name
	}

	var columns []string
	for columnName, _ := range rule.PaddingMap {
		columns = append(columns, columnName)
	}
	marks := make([]string, len(columns))
	for i := 0; i < len(columns); i++ {
		marks[i] = "?"
	}
	notPKColumns := dgcoll.Remove(columns, pkColumns)
	duplicateUpdates := make([]string, len(notPKColumns))
	for i, notPKColumn := range notPKColumns {
		duplicateUpdates[i] = fmt.Sprintf("%s = ?", notPKColumn)
	}
	execSql := fmt.Sprintf("INSERT INTO %s (%s) VALUES(%s) (ON DUPLICATE KEY UPDATE %s)", group, strings.Join(columns, ", "), strings.Join(marks, ", "), strings.Join(duplicateUpdates, ", "))

	for _, row := range rows {
		metrics.UpdateActionNum(row.Action, row.RuleKey)
		kvm := rowMap(row, rule, false)
		var values []any

		for _, columnName := range columns {
			if value, ok := kvm[columnName]; ok {
				values = append(values, value)
			}
		}

		for _, columnName := range notPKColumns {
			if value, ok := kvm[columnName]; ok {
				values = append(values, value)
			}
		}

		err := s.conn.Exec(context.Background(), execSql, values...)
		if err != nil {
			logs.Errorf("conn.Exec[%s] error: %v", execSql, err)
			alarm.EmergencyAlarm(err)
			return err
		}
	}

	return nil
}

func (s *ClickHouseEndpoint) batchDelete(group string, rows []*model.RowRequest) error {
	rule, _ := global.RuleIns(rows[0].RuleKey)

	if rule.IsCompositeKey {
		conditions := make([]string, len(rule.TableInfo.PKColumns))
		for i, index := range rule.TableInfo.PKColumns {
			key := stringutil.ToString(rule.TableInfo.Columns[index])
			conditions[i] = fmt.Sprintf("%s = ?", key)
		}
		execSql := "DELETE FROM " + group + " WHERE " + strings.Join(conditions, " and ")

		for _, row := range rows {
			kvm := rowMap(row, rule, false)
			values := make([]any, len(rule.TableInfo.PKColumns))

			for i, index := range rule.TableInfo.PKColumns {
				key := stringutil.ToString(rule.TableInfo.Columns[index])
				values[i] = kvm[key]
			}

			err := s.conn.Exec(context.Background(), execSql, values...)
			if err != nil {
				logs.Errorf("conn.Exec[%s] error: %v", execSql, err)
				alarm.EmergencyAlarm(err)
				return err
			}
		}
	} else {
		key := stringutil.ToString(rule.TableInfo.Columns[0].Name)
		marks := make([]string, len(rows))
		for i := 0; i < len(rows); i++ {
			marks[i] = "?"
		}
		execSql := fmt.Sprintf("DELETE FROM %s WHERE %s IN (%s)", group, key, strings.Join(marks, ", "))

		values := make([]any, len(rows))
		for i, row := range rows {
			kvm := rowMap(row, rule, false)
			values[i] = kvm[key]
		}

		err := s.conn.Exec(context.Background(), execSql, values...)
		if err != nil {
			logs.Errorf("conn.Exec[%s] error: %v", execSql, err)
			alarm.EmergencyAlarm(err)
			return err
		}
	}

	return nil
}
