package logging

import (
	"fmt"
	"gitee.com/kudingc/zh_logs/hook"
	"gitee.com/kudingc/zh_logs/library/email"
	nested "github.com/antonfisher/nested-logrus-formatter"
	rotatelogs "github.com/lestrrat-go/file-rotatelogs"
	"github.com/natefinch/lumberjack"
	log "github.com/sirupsen/logrus"
	"io"
	"os"
	"path/filepath"
	"runtime"
	"sync"
	"time"
)

type Level int
type Model int
type Format int
type SplitModel int

var (
	DefaultCallerDepth  = 2
	levelFlags          = []string{"Panic", "Fatal", "Error", "Warn", "Info", "Debug", "Trace"}
	mapWriter           = make(map[Level]*log.Logger)
	logAdditionalFields log.Fields
	lock                sync.Mutex
)

const (
	Panic Level = iota
	Fatal
	Error
	Warn
	Info
	Debug
	Trace
)

// ZhLogger 日志模块配置文件结构体
type ZhLogger struct {
	LogSavePath         string                 // 文件保存路径
	LogSaveModel        Model                  // 日志保存模式  Default：Customize：自定义文件名，StandAlone：各类型日志分开保存
	LogSaveName         string                 // 日志保存名称
	LogLevel            Level                  // 日志级别
	LogFormatter        Format                 // 日志格式
	LogSplitModel       SplitModel             // 日志分割模式 默认按时间分割
	LogSplitTime        time.Duration          // 日志分割时长，只有LogSplitModel为Date时才生效
	LogMaxSize          int                    // 日志分割大小，以MB为单位，默认大小为100M 只有LogSplitModel为Size时才生效
	LogMaxBackUp        int                    // 最大保留过期文件个数
	PrintStdout         bool                   // 是否同时将日志输出到控制台 true:输出 false:不输出（默认）
	LogAdditionalFields map[string]interface{} // 日志附加字段 该附加字段会附加到日志中输出出来
	SendToEmail         EmailConfig            // 发送日志到邮件
	SendToKafka         KafkaConfig            // 发送日志到kafka
	logFileFullPath     string
	logPrefix           string
}

// EmailConfig email配置文件
type EmailConfig struct {
	Enable       bool             // 是否启用发送日志到邮件
	Title        string           // 邮件标题 后面会自动添加日志类型
	SubscribeMap map[Level]string // Level为指定的日志类型，string为订阅的邮件地址，如果有多个邮件的话，使用分号隔开
	SenderData   EmailSenderConfig
}

// EmailSenderConfig emailSender配置文件
type EmailSenderConfig struct {
	SenderUser   string // 发送方emil地址 比如 abc@qq.com
	SenderPasswd string // 发送方密码 为开启smtp后给的秘钥
	SenderHost   string // 发送方host，比如 smtp.qq.com
	SenderPort   int    // 端口 如果为POP3/SMTP服务 则端口为25
}

// KafkaConfig kafka配置文件
type KafkaConfig struct {
	Enable bool     // 是否启用发送日志到kafka
	Broker []string // kafka服务地址
	Topic  string   // topic
}

const (
	Integrated Model = iota // 将所有类型日志保存到同一个文件中（默认）
	StandAlone              // 各类型日志分开保存
)

const (
	Date SplitModel = iota // 按日期分割日志文件
	Size                   // 按大小分割日志文件
)

const (
	Normal Format = iota
	Text
	Json
)

// InitConfig 修改日志组件配置，不调用此方法的话，则使用默认配置
func (zhLogger *ZhLogger) InitConfig() {
	zhLogger.init()
	zhLogger.setDefaultParams()
	zhLogger.setOutPut()
	zhLogger.setAdditionalFields()
}

func (zhLogger ZhLogger) Debug(v ...interface{}) string {
	lock.Lock() // 加锁
	logAdditionalFields["location"] = zhLogger.setPrefix()
	mapWriter[Debug].WithFields(logAdditionalFields).Debug(v)
	lock.Unlock() // 解锁
	return fmt.Sprint(v)
}

func (zhLogger ZhLogger) Info(v ...interface{}) string {
	lock.Lock() // 加锁
	logAdditionalFields["location"] = zhLogger.setPrefix()
	mapWriter[Info].WithFields(logAdditionalFields).Info(v)
	lock.Unlock() // 解锁
	return fmt.Sprint(v)
}

func (zhLogger ZhLogger) Warn(v ...interface{}) string {
	lock.Lock() // 加锁
	logAdditionalFields["location"] = zhLogger.setPrefix()
	mapWriter[Warn].WithFields(logAdditionalFields).Warn(v)
	lock.Unlock() // 解锁
	return fmt.Sprint(v)
}

func (zhLogger ZhLogger) Error(v ...interface{}) string {
	lock.Lock() // 加锁
	logAdditionalFields["location"] = zhLogger.setPrefix()
	mapWriter[Error].WithFields(logAdditionalFields).Error(v)
	lock.Unlock() // 解锁
	return fmt.Sprint(v)
}

func (zhLogger ZhLogger) Fatal(v ...interface{}) string {
	lock.Lock() // 加锁
	logAdditionalFields["location"] = zhLogger.setPrefix()
	mapWriter[Fatal].WithFields(logAdditionalFields).Fatal(v)
	lock.Unlock() // 解锁
	return fmt.Sprint(v)
}

func (zhLogger ZhLogger) setPrefix() (logPrefix string) {
	_, file, line, ok := runtime.Caller(DefaultCallerDepth)
	if ok {
		logPrefix = fmt.Sprintf("%s:%d", filepath.Base(file), line)
	}
	return
}

func (zhLogger *ZhLogger) init() {
	if zhLogger.SendToEmail.Enable {
		hook.InitEmailSubscribe(zhLogger.SendToEmail.Title, email.Sender{
			User:     zhLogger.SendToEmail.SenderData.SenderUser,
			Password: zhLogger.SendToEmail.SenderData.SenderPasswd,
			Host:     zhLogger.SendToEmail.SenderData.SenderHost,
			Port:     zhLogger.SendToEmail.SenderData.SenderPort,
		})
	}
	if zhLogger.SendToKafka.Enable {
		hook.InitKafkaSubscribe(zhLogger.SendToKafka.Broker, zhLogger.SendToKafka.Topic)
	}
}

func (zhLogger *ZhLogger) setDefaultParams() {
	if zhLogger.LogSavePath == "" {
		zhLogger.LogSavePath = logSavePath
	}
	if zhLogger.LogSaveName == "" {
		zhLogger.LogSaveName = logSaveName
	}
	if zhLogger.LogSplitTime == time.Duration(0) {
		zhLogger.LogSplitTime = time.Hour * 24 * 7
	}
	if zhLogger.LogMaxBackUp == 0 {
		zhLogger.LogMaxBackUp = 7
	}
	if zhLogger.LogAdditionalFields == nil {
		zhLogger.LogAdditionalFields = make(map[string]interface{})
	}
}

func (zhLogger *ZhLogger) setAdditionalFields() {
	logAdditionalFields = zhLogger.LogAdditionalFields
}

// 设置输出目录
func (zhLogger *ZhLogger) setOutPut() {
	switch zhLogger.LogSaveModel {
	case Integrated:
		loggerEntry := zhLogger.CreateNewLogger(-1)
		for i, _ := range levelFlags {
			mapWriter[Level(i)] = loggerEntry
		}
		break
	case StandAlone:
		for i, flag := range levelFlags {
			zhLogger.LogSaveName = flag
			mapWriter[Level(i)] = zhLogger.CreateNewLogger(Level(i))
		}
		break
	}
	return
}

func (zhLogger *ZhLogger) CreateNewLogger(level Level) (logger *log.Logger) {
	var suffixPath, fullPath, logFileFullSplitPath string
	var writerFile io.Writer

	logger = log.New()
	logger.SetLevel(log.Level(zhLogger.LogLevel))
	switch zhLogger.LogFormatter {
	case Normal:
		logger.SetFormatter(&nested.Formatter{
			HideKeys:        true,
			TimestampFormat: "2006-01-02 15:04:05",
			ShowFullLevel:   true,
			NoColors:        true,
		})
		break
	case Text:
		logger.SetFormatter(&log.TextFormatter{TimestampFormat: "2006-01-02 15:04:05"})
		break
	case Json:
		logger.SetFormatter(&log.JSONFormatter{TimestampFormat: "2006-01-02 15:04:05"})
	}
	suffixPath = fmt.Sprintf("%s.%s", zhLogger.LogSaveName, logFileExt)
	fullPath = fmt.Sprintf("%s%s", zhLogger.LogSavePath, suffixPath)

	if zhLogger.LogSplitModel == Size {
		// 按大小分割文件
		writerFile = &lumberjack.Logger{
			Filename:   fullPath,              // 日志文件位置
			MaxSize:    zhLogger.LogMaxSize,   // 单文件最大容量,单位是MB
			MaxBackups: zhLogger.LogMaxBackUp, // 最大保留过期文件个数
			LocalTime:  false,                 // 是否使用本地时间
			Compress:   false,                 // 是否需要压缩滚动日志, 使用的 gzip 压缩
		}
	} else {
		// 按时间分割文件
		if zhLogger.LogSplitTime < 1*time.Hour {
			// 日志文件名按分钟分割
			logFileFullSplitPath = fullPath + ".%Y%m%d%H%M"
		} else if zhLogger.LogSplitTime < 24*time.Hour {
			// 日志文件名按小时分割
			logFileFullSplitPath = fullPath + ".%Y%m%d%H"
		} else {
			// 日志文件名按天分割
			logFileFullSplitPath = fullPath + ".%Y%m%d"
		}
		writerFile, _ = rotatelogs.New(
			logFileFullSplitPath,
			rotatelogs.WithLinkName(fullPath),                         // 生成软链，指向最新日志文件
			rotatelogs.WithRotationCount(uint(zhLogger.LogMaxBackUp)), // 日志保存数量
			rotatelogs.WithRotationTime(zhLogger.LogSplitTime),        // rotate 最小为1分钟轮询。默认60s  低于1分钟就按1分钟来
			rotatelogs.WithRotationSize(100*1024*1024),                // 设置100MB大小,当大于这个容量时，创建新的日志文件
		)
	}

	if zhLogger.PrintStdout {
		logger.SetOutput(io.MultiWriter(writerFile, os.Stdout))
	} else {
		logger.SetOutput(writerFile)
	}

	if zhLogger.SendToEmail.Enable {
		for key, subscribeMap := range zhLogger.SendToEmail.SubscribeMap {
			if level == -1 || level == key {
				logger.AddHook(hook.NewEmailSubscribeHook(hook.NewEmailSubscribeMap(log.Level(key), subscribeMap)))
			}
		}
	}

	if zhLogger.SendToKafka.Enable {
		logger.AddHook(hook.NewKafkaHook())
	}
	return
}
