package db

import (
	"context"
	"fmt"
	"sync/atomic"

	"engine/db/driver"
	engMysql "engine/db/driver/mysql"

	"gitee.com/ameise84/e3lock"
	"gitee.com/ameise84/e3pool/go_pool"
	"gitee.com/ameise84/e3pool/obj_pool"
	time "gitee.com/ameise84/e3time"
	"gitee.com/ameise84/e3utils/cache"
	"gitee.com/ameise84/e3utils/errors"
	jsoniter "github.com/json-iterator/go"
	"github.com/redis/go-redis/v9"
	"gorm.io/gorm"
)

type opType int

const (
	opAdd   opType = 0
	opDel   opType = 1
	saveNum        = 256
)

var _gDumpDataPool obj_pool.Pool[*dataMap]

func init() {
	_gDumpDataPool = obj_pool.NewPool[*dataMap](func() *dataMap {
		return &dataMap{
			make(map[string]*dataMeta, saveNum),
		}
	})
}

type dataMeta struct {
	op    opType
	f     Factory
	table string
	id    string
	r     *redis.StringCmd
}

type dataMap struct {
	Mp map[string]*dataMeta
}

func (ts *dataMap) E3LogMarshall() string {
	v, _ := jsoniter.MarshalToString(ts.Mp)
	return v
}

func newDumper() *dumper {
	dw := &dumper{
		writeMap:   _gDumpDataPool.Get(),
		tableCache: cache.New[string](cache.DefaultOptions().SetSize(0).SetTTL(time.Hour * 24)),
	}
	dw.runner = go_pool.NewGoRunner(dw, "db dumper", go_pool.DefaultOptions().SetBlock(false).SetSize(2))
	return dw
}

type dumper struct {
	kind       string
	dr         *gorm.DB
	redisCli   redis.UniversalClient
	tr         time.Timer
	isInWrite  atomic.Bool
	runner     go_pool.GoRunner
	mu         e3lock.SpinLock
	writeMap   *dataMap
	tableCache cache.Cache[string]
}

func (ts *dumper) OnPanic(err error) {
	_gDBLogger.Error("driver dumper").Err(err).Println()
}

func (ts *dumper) start(kind string, redisCli redis.UniversalClient) (err error) {
	_gDBLogger.Info("start db dumper").Println()
	if ts.dr, err = driver.Start(kind); err != nil {
		_gDBLogger.Error("db start dumper").Err(err).Println()
		return err
	}

	ts.kind = kind
	ts.redisCli = redisCli
	ts.tr = time.NewTick(ts, ts, 10*time.Second, time.InfiniteTimes)
	ts.isInWrite.Store(false)
	return nil
}

func (ts *dumper) stop() {
	_gDBLogger.Info("stop db dumper").Println()
	ts.tr.Pause()
	ts.mu.Lock()
	defer ts.mu.Unlock()
	if len(ts.writeMap.Mp) > 0 {
		ts.toSave()
	}
	ts.runner.Wait()
	ts.dr = nil
	driver.Stop(ts.kind)
}

func (ts *dumper) OnTimer(_ time.Timer, fireAt time.Time) {
	ts.mu.Lock()
	defer ts.mu.Unlock()
	if len(ts.writeMap.Mp) > 0 {
		ts.readyToWrite()
	}
}

func (ts *dumper) Save(m TableMeta, key string, fields ...string) {
	ts.mu.Lock()
	defer ts.mu.Unlock()
	for _, field := range fields {
		k := key + field
		if v, ok := ts.writeMap.Mp[k]; ok {
			v.op = opAdd
			continue
		}
		if _, ok := ts.tableCache.LoadAndStore(m.TableName(), struct{}{}); !ok {
			_ = ts.dr.Table(m.TableName()).AutoMigrate(m)
		}
		f, ok := gFactoryMap[m.BaseTableName()]
		if !ok {
			_gDBLogger.Error(fmt.Sprintf("db dump not found factory:%s", key)).Println()
			return
		}
		ts.writeMap.Mp[k] = &dataMeta{op: opAdd, f: f, table: key, id: field}
	}

	if len(ts.writeMap.Mp) >= saveNum {
		ts.readyToWrite()
	}
}

func (ts *dumper) Load(m TableMeta, id string) error {
	tableName := m.TableName()
	if _, ok := ts.tableCache.LoadAndStore(tableName, struct{}{}); !ok {
		_ = ts.dr.Table(tableName).AutoMigrate(m)
	}
	err := ts.dr.Table(tableName).Where("id = ?", id).First(m).Error
	if err != nil {
		if errors.Is(err, gorm.ErrRecordNotFound) {
			return redis.Nil
		}
		if engMysql.IsErrors(err, engMysql.TableNotFind1146) {
			return ErrTableNotFound
		}
	}
	return err
}

func (ts *dumper) Del(base, key string, fields ...string) {
	ts.mu.Lock()
	defer ts.mu.Unlock()
	for _, field := range fields {
		k := key + field
		if x, ok := ts.writeMap.Mp[k]; ok {
			x.op = opDel
			continue
		}
		f, ok := gFactoryMap[base]
		if !ok {
			_gDBLogger.Error(fmt.Sprintf("db dump not found factory:%s", key)).Println()
			return
		}
		ts.writeMap.Mp[k] = &dataMeta{op: opDel, f: f, table: key, id: field}
	}
	if len(ts.writeMap.Mp) >= saveNum {
		ts.readyToWrite()
	}
}

func (ts *dumper) readyToWrite() {
	if ts.isInWrite.CompareAndSwap(false, true) {
		ts.toSave()
	}
}

func (ts *dumper) toSave() {
	readMap := _gDumpDataPool.Get()
	ts.writeMap, readMap = readMap, ts.writeMap
	err := ts.runner.AsyncRun(ts.doSave, readMap)
	if err != nil {
		ts.writeMap, readMap = readMap, ts.writeMap
		_gDBLogger.Error("dum to save").Err(err).Object(readMap).Println()
		clear(readMap.Mp)
		_gDumpDataPool.Put(readMap)
	}
}

func (ts *dumper) doSave(args ...any) {
	readMap := args[0].(*dataMap)
	pip := ts.redisCli.Pipeline()
	ctx := context.Background()
	for _, v := range readMap.Mp {
		switch v.op {
		case opAdd:
			r := pip.HGet(ctx, v.table, v.id)
			v.r = r
		case opDel:
		}
	}
	if _, err := pip.Exec(ctx); err != nil {
		return
	}
	for _, v := range readMap.Mp {
		m := v.f()
		switch v.op {
		case opAdd:
			if err := v.r.Scan(m); err == nil {
				if rr := ts.dr.Table(v.table).Save(m); rr.Error != nil {
					_gDBLogger.ErrorPrintf("db dump add:%+v err:%v", m, rr.Error)
				}
			} else {
				_gDBLogger.Error("db dump add", false).Err(err).Println()
			}
		case opDel:
			ts.dr.Table(v.table).Delete(m, v.id)
		default:
		}
	}
	ts.isInWrite.Store(false)
	clear(readMap.Mp)
	_gDumpDataPool.Put(readMap)
}
