package mysql

import (
	"errors"
	"fmt"
	"gitee.com/captials-team/ubdframe/src/domain/configstc"
	"gitee.com/captials-team/ubdframe/src/domain/dto"
	"gitee.com/captials-team/ubdframe/src/domain/dto/paginate"
	"gitee.com/captials-team/ubdframe/src/domain/interfaces"
	"gitee.com/captials-team/ubdframe/src/domain/models"
	mysqlClients "gitee.com/captials-team/ubdframe/src/infrastructure/clients/mysql"
	"gitee.com/captials-team/ubdframe/src/infrastructure/dao"
	"gitee.com/captials-team/ubdframe/src/pkg/logs"
	"gorm.io/gorm"
	"strings"
	"time"
)

func NewMetricDataDao(conf configstc.DBConfig) *MetricDataDao {
	return &MetricDataDao{
		db:                mysqlClients.NewGormDB(conf),
		conf:              conf,
		SplitTableDaoHelp: dao.SplitTableDaoHelp{Model: new(models.MetricData), TablePrefix: conf.TablePrefix},
	}
}

type MetricDataDao struct {
	db   *gorm.DB
	conf configstc.DBConfig

	dao.SplitTableDaoHelp
	GormDaoHelp
	metricDaoHelp
}

func (dao *MetricDataDao) Search(search *dto.SearchMetricDataDto, pa *paginate.Pager) ([]*models.MetricData, *paginate.Pager, error) {
	if search == nil {
		search = &dto.SearchMetricDataDto{}
	}
	if pa == nil {
		pa = &paginate.Pager{}
	}
	pa.Correct()
	table := dao.SplitTable(search.MetricId, "")
	db := dao.db.Table(table)

	var list []*models.MetricData

	if search.MetricId > 0 {
		db = db.Where("metric_id=?", search.MetricId)
	}

	if search.StartTime > 0 {
		db = db.Where("timestamp>=?", search.StartTime)
	}
	if search.EndTime > 0 {
		db = db.Where("timestamp<=?", search.EndTime)
	}
	if ret := db.Count(&pa.Total); ret.Error != nil {
		return list, pa, ret.Error
	}

	db = db.Order("id desc")
	if pa.PagingId > 0 {
		db = db.Where("id<?", pa.PagingId)
	} else {
		db = db.Offset(pa.Offset())
	}

	if ret := db.Limit(pa.Size).Find(&list); ret.Error != nil {
		return list, pa, ret.Error
	}
	pa.Correct()
	return list, pa, nil
}

func (dao *MetricDataDao) Query(metricId int64, timestamp int64) (*models.MetricData, error) {
	var info models.MetricData
	db := dao.db.Model(&info)
	db = db.Where("metric_id = ?", metricId).Where("timestamp", timestamp)
	ret := db.First(&info)
	if errors.Is(ret.Error, gorm.ErrRecordNotFound) {
		return nil, nil
	}
	if ret.Error != nil {
		return nil, ret.Error
	}
	return &info, nil
}

func (dao *MetricDataDao) QuerySum(search *dto.SearchMetricDataDto) (*models.MetricValueData, error) {
	table := dao.SplitTable(search.MetricId, "")
	db := dao.db.Table(table)

	var find models.MetricValueData
	find.MetricId = search.MetricId
	if search.MetricId > 0 {
		db = db.Where("metric_id", search.MetricId)
	}
	if search.StartTime > 0 {
		db = db.Where("timestamp>=?", search.StartTime)
	}
	if search.EndTime > 0 {
		db = db.Where("timestamp<=?", search.EndTime)
	}
	fields := []string{
		"metric_id",
		"SUM(`value`) as `value`",
	}

	logs.Out.Info("Fields= %+v,%+v,%s", fields, search, table)

	ret := db.Select(fields).First(&find)
	if errors.Is(ret.Error, gorm.ErrRecordNotFound) {
		return &find, nil
	}
	if ret.Error != nil {
		return &find, ret.Error
	}

	return &find, nil
}

// CleanAll 清理数据,指定过期时间戳，最大删除数量
func (dao *MetricDataDao) CleanAll(cleanDto dto.CleanMetricDataDto) (int64, error) {
	tables, err := dao.FetchTables(dao.db, new(models.MetricData).SplitTableName("%"))
	if err != nil {
		return 0, err
	}

	var effect int64
	for _, tb := range tables {
		//dao.l.Info("start clean table=%s, expired=%d, max=%d, process=%d/%d", tb, cleanDto, index, len(tables))
		tmp, err := dao.cleanTableDataByExpiredTimestamp(tb, cleanDto.EndTime, cleanDto.MaxRow)
		if err != nil {
			//dao.l.Error("clean table=%s err %s", tb, err)
			continue
		}
		effect += tmp

		//dao.l.Info("clean table=%s success,affect= %d", tb, tmp)

		//加sleep，防止db性能突刺
		time.Sleep(time.Millisecond * 300)
	}

	return effect, nil
}

// Clean 清理数据,指定过期时间戳，最大删除数量
func (dao *MetricDataDao) Clean(cleanDto dto.CleanMetricDataDto) (int64, error) {
	return dao.cleanTableDataByExpiredTimestamp(cleanDto.Table, cleanDto.EndTime, cleanDto.MaxRow)
}

// cleanExpiredTimestampTableData 清理过期数据,指定过期时间戳，最大删除数量
func (dao *MetricDataDao) cleanTableDataByExpiredTimestamp(table string, expiredAt int64, maxNum int64) (int64, error) {
	sql := fmt.Sprintf("DELETE FROM %s WHERE `timestamp`<=%d", table, expiredAt)
	if maxNum > 0 {
		sql += fmt.Sprintf(" LIMIT %d", maxNum)
	}

	ret := dao.db.Exec(sql)

	return ret.RowsAffected, ret.Error

	//手动优化表磁盘
	//影响性能，暂时不优化
	//_, err = dao.OptimizeTable(dao.db, tb)
	//if err != nil {
	//	logs.Error("clean table=%s err %s", tb, err)
	//	return 0,err
	//}
	//return ret.RowsAffected, ret.Error
}

func (dao *MetricDataDao) InitTable(tableName string) error {
	return dao.BeInitSubTable(dao.db, tableName, new(models.MetricData))
}

func (dao *MetricDataDao) InitSplitTable(splitKey string) error {
	return dao.BeInitSubTable(dao.db, dao.SplitTable(0, splitKey), new(models.MetricData))
}

func (dao *MetricDataDao) Add(add *models.MetricData) (int64, error) {
	return dao.BatchAdd(add)
}

func (dao *MetricDataDao) BatchAdd(adds ...*models.MetricData) (int64, error) {
	var sql []string
	for _, v := range adds {
		sql = append(sql, dao.SQLForWriteMetricData(dao.SplitTable(v.MetricId, ""), []models.MetricData{*v}))
	}

	ret := dao.db.Exec(strings.Join(sql, ";"))
	return ret.RowsAffected, ret.Error
}

func (dao *MetricDataDao) Delete(metricId int64, timestamp int64) (int64, error) {
	db := dao.db.Model(new(models.MetricData))
	db = db.Where("metric_id = ?", metricId).Where("timestamp", timestamp)
	ret := db.Delete(&models.MetricData{})
	if ret.Error != nil {
		return 0, ret.Error
	}
	if ret.RowsAffected == 0 {
		return 0, fmt.Errorf("no delete")
	}
	return ret.RowsAffected, nil
}

func (dao *MetricDataDao) Migrate() (int64, error) {
	return 1, dao.db.Table(dao.SplitTable(0, "")).Migrator().AutoMigrate(dao.Model)
}

func (dao *MetricDataDao) DB() *gorm.DB {
	return dao.db
}

func (dao *MetricDataDao) Use(db *gorm.DB) interfaces.ItfMetricDataDao {
	return &MetricDataDao{db: db}
} //使用db

// metricDaoHelp metric数据操作辅助用
type metricDaoHelp struct {
}

// SQLForWriteMetricData 写入metricData的sql(最新数据)
func (h metricDaoHelp) SQLForWriteMetricData(table string, dataEntities []models.MetricData) string {
	var values []string
	for _, entity := range dataEntities {
		//updatedAt在这个表中是更新的time_stamp的含义，若新的time_stamp<当前updatedAt则不更新
		if entity.UpdatedAt == nil {
			t0 := time.UnixMilli(entity.Timestamp)
			entity.UpdatedAt = &t0
		}
		values = append(values, fmt.Sprintf("('%d', '%s', '%d', '%s', '%s')",
			entity.MetricId, entity.Value,
			entity.Timestamp, time.Now().Format(time.DateTime), entity.UpdatedAt.Format(time.DateTime)),
		)
	}
	if table == "" {
		table = new(models.MetricData).TableName()
	}

	onDuplicate := `ON DUPLICATE KEY UPDATE
  value = if(VALUES(updated_at)>=updated_at,VALUES(value),value),
  timestamp=VALUES(timestamp),
  updated_at=if(VALUES(updated_at)>=updated_at,VALUES(updated_at),updated_at)
`

	sql := fmt.Sprintf("INSERT INTO %s (`metric_id`,`value`,`timestamp`,`created_at`,`updated_at`) VALUES %s %s",
		table,
		strings.Join(values, ","),
		onDuplicate,
	)

	return sql
}

func (h metricDaoHelp) SQLForWriteMetricStatData(table string, metricData []*models.MetricStatData, merge bool) []string {

	/*
	   	   测试用sql:
	   	   INSERT INTO device_properties_data_stat
	   	     (`station_key`,`device_id`,`stat_type`,`property_key`,
	   	       `value_last`,
	   	       `value_min`,
	   	       `value_max`,
	   	       `value_avg`,
	   	       `value_sum`,
	   	       `value_count`,
	   	       `time_stamp`)
	   	     VALUES
	   	     ('station_aaa',"device_xxxx","15m","cpu","120","100","100","100","100","1",1687995244),
	   	     ('station_aaa',"device_xxxx","1d","cpu","110","110","110","110","110","1",1687995244),
	   	     ('station_aaa',"device_xxxx","15m","cpu1","120","120","120","120","120","1",1687995244),
	   	     ('station_aaa',"device_xxxx","1d","cpu2","130","130","130","130","130","1",1687995244)
	   	     ON DUPLICATE KEY UPDATE
	     	              value_last = VALUES(value_last),
	                     value_min = TRUNCATE(IF(TRUNCATE(value_min,2)>TRUNCATE(VALUES(value_min),2),VALUES(value_min),value_min),2),
	                     value_max = TRUNCATE(IF(TRUNCATE(value_max,2)<TRUNCATE(VALUES(value_max),2),VALUES(value_max),value_max),2),
	                     value_avg = TRUNCATE((value_sum+VALUES(value_sum))/(value_count+VALUES(value_count)),4),
	                     value_sum = TRUNCATE(value_sum+VALUES(value_sum),4),
	                     value_count = value_count+VALUES(value_count),
	                     time_stamp=VALUES(time_stamp);
	*/

	values := []string{}
	for _, entity := range metricData {
		//转换对应的stat sql
		if entity.UpdatedAt == 0 {
			entity.UpdatedAt = entity.Timestamp
		}
		//数字类的
		values = append(values,
			fmt.Sprintf("('%s', '%d', '%s','%s', '%s','%s', '%s','%s', '%d', '%d', '%d')",
				entity.StatType, entity.MetricId,
				entity.ValueLast, entity.ValueMin, entity.ValueMax, entity.ValueAvg, entity.ValueSum, entity.ValueCount,
				entity.Timestamp, time.Now().UnixMilli(), entity.UpdatedAt),
		)
	}

	if table == "" {
		table = new(models.MetricStatData).TableName()
	}

	//精度问题使用以下sql复现：
	//  select "32.7"+"3.27" as "a";
	//解决sql：
	// select CAST("32.7" as  DECIMAL(20,6))+CAST("3.27" as DECIMAL(20,6)) as "a";

	var sqls []string
	//数字类的保存
	if len(values) == 0 {
		return sqls
	}
	//avg精度保留4位
	onDuplicate := `ON DUPLICATE KEY UPDATE
	            value_last = VALUES(value_last),
	            value_sum = VALUES(value_sum),
	            value_min = VALUES(value_min),
	            value_max = VALUES(value_max),
	            value_count = VALUES(value_count),
	            value_avg = VALUES(value_avg),
                timestamp=VALUES(timestamp)
`
	if merge {
		onDuplicate = `ON DUPLICATE KEY UPDATE
	            value_last = VALUES(value_last),
                value_avg = TRUNCATE((value_sum+VALUES(value_sum))/(value_count+VALUES(value_count)),4),
                value_min = TRUNCATE(IF(TRUNCATE(value_min,4)>TRUNCATE(VALUES(value_min),4),VALUES(value_min),value_min),4),
                value_max = TRUNCATE(IF(TRUNCATE(value_max,4)<TRUNCATE(VALUES(value_max),4),VALUES(value_max),value_max),4),
                value_sum = TRUNCATE(value_sum+VALUES(value_sum),4),
                value_count = TRUNCATE(value_count+VALUES(value_count),4),
                timestamp=VALUES(timestamp)
`
	}
	sql := fmt.Sprintf("INSERT INTO %s (`stat_type`,`metric_id`,`value_last`,`value_min`,`value_max`,`value_avg`,`value_sum`,`value_count`,`timestamp`,`created_at`,`updated_at`) VALUES %s %s",
		table,
		strings.Join(values, ","),
		onDuplicate,
	)

	sqls = append(sqls, sql)

	return sqls
}
