/*
 * Copyright (C) 2025 ameise <ameise.wang@gmail.com> - All Rights Reserved
 *
 * This file is part of e3log.
 *
 * e3log is free software: you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation, either version 3 of the License, or
 * (at your option) any later version.
 *
 * e3log is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with e3log. If not, see <https://www.gnu.org/licenses/>.
 */

package e3log

import (
	"compress/gzip"
	"fmt"
	"github.com/fatih/color"
	"github.com/pkg/errors"
	"io"
	"os"
	"path"
	"path/filepath"
	"sort"
	"strings"
	"time"
)

func DefaultFileWriterConfig() FileWriterConfig {
	return FileWriterConfig{
		FileName:        "",
		SyncLevel:       LevelError,
		MaxSize:         10,
		MaxBackups:      1,
		MaxAge:          3,
		Compress:        true,
		CompressBackups: 2,
		CompressAge:     30,
		UseMMap:         false,
		MMapBlock:       512,
	}
}

type FileWriterConfig struct {
	FileName        string // 日志文件路径
	SyncLevel       Level  // 同步到磁盘触发日志等级
	MaxSize         int64  // 每个日志文件最大尺寸(单位 MB)
	MaxBackups      int    // 保留日志最大个数
	MaxAge          int    // 保留日志最大天数
	Compress        bool   // 是否归档过期日志
	CompressBackups int    // 归档日志最大个数
	CompressAge     int    // 归档日志最大保留天数
	UseMMap         bool   // 是否启用内存映射
	MMapBlock       int    // 内存映射分块大小,必须是 4KB 的倍数(单位 KB)
}

func NewFileWriter(cfg ...FileWriterConfig) (*FileWriter, error) {
	if len(cfg) == 0 {
		cfg = []FileWriterConfig{DefaultFileWriterConfig()}
	}
	w := FileWriter{
		cfg: cfg[0],
	}

	w.cfg.MaxSize *= MB
	if w.cfg.FileName == "" {
		exePath, err := os.Executable()
		if err != nil {
			return nil, err
		}
		ext := path.Ext(exePath)
		w.cfg.FileName = strings.TrimSpace(strings.TrimSuffix(exePath, ext)) + ".log"
	}

	if err := w.openOrCreateFile(); err != nil {
		return nil, err
	}
	w.cleanOldLogs()
	return &w, nil
}

type FileWriter struct {
	cfg             FileWriterConfig
	file            *os.File
	offset          int64
	index           int64
	backupTime      time.Time
	writeErrHandler func(error, Level, string)
}

func (ts *FileWriter) write(level Level, _ *color.Color, bs []byte) {
	size := int64(len(bs))
	if ts.offset+size > ts.cfg.MaxSize {
		_ = ts.backupFile()
	}

	if ts.file == nil {
		if err := ts.createFile(); err != nil {
			if ts.writeErrHandler != nil {
				ts.writeErrHandler(errors.Wrap(err, "file write error"), level, logBytesToString(bs))
			} else {
				_, _ = fmt.Fprintf(os.Stderr, "could not write msg: %s , err: %v\n", string(bs), err)
			}
			return
		}
	}

	n, err := ts.file.Write(bs)
	if err != nil {
		if ts.writeErrHandler != nil {
			ts.writeErrHandler(errors.Wrap(err, "file write error"), level, logBytesToString(bs))
		} else {
			_, _ = fmt.Fprintf(os.Stderr, "could not write msg: %s , err: %v\n", string(bs), err)
		}
		if n > 0 {
			_, _ = ts.file.Seek(int64(n), io.SeekCurrent)
			_ = ts.file.Truncate(ts.offset)
		}
		return
	}

	ts.offset += int64(n)
	if level >= ts.cfg.SyncLevel {
		_ = ts.file.Sync()
	}
}

func (ts *FileWriter) sync() {
	if err := ts.file.Sync(); err != nil {
		_, _ = fmt.Fprintf(os.Stderr, "sync file err: %v\n", err)
	}
}

func (ts *FileWriter) openOrCreateFile() error {
	if ts.file == nil {
		fileName := ts.cfg.FileName
		info, err := os.Stat(fileName)
		if os.IsNotExist(err) {
			return ts.createFile()
		}
		if err != nil {
			return errors.Wrap(err, "file writer open or create")
		}
		ts.file, err = os.OpenFile(ts.cfg.FileName, os.O_WRONLY, 0644)
		if err != nil {
			return ts.createFile()
		}
		ts.offset = info.Size()
		_, _ = ts.file.Seek(ts.offset, io.SeekStart)
	}
	return nil
}

func (ts *FileWriter) createFile() error {
	fileName := ts.cfg.FileName
	filePath := filepath.Dir(fileName)
	err := os.MkdirAll(filePath, 0744)
	if err != nil {
		return errors.Wrap(err, "can't make dir for new logfile")
	}
	f, err := os.OpenFile(fileName, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644)
	if err != nil {
		return errors.Wrap(err, "can't open new logfile")
	}
	ts.file = f
	ts.offset = 0
	return nil
}

func (ts *FileWriter) backupFile() error {
	if ts.file == nil {
		return nil
	}
	fileName := ts.file.Name()
	now := time.Now()
	if ts.backupTime != now {
		ts.backupTime = now
		ts.index = 0
	}
	var newFileName string
	newFileName, ts.index = setBackupName(fileName, now, ts.index)
	_ = ts.file.Close()
	ts.file = nil
	err := os.Rename(fileName, newFileName)
	if err != nil {
		return err
	}
	return ts.createFile()
}

func (ts *FileWriter) cleanOldLogs() {
	if !(ts.cfg.MaxAge > 0 || ts.cfg.MaxBackups > 0) {
		return
	}

	fileName := ts.cfg.FileName
	filePath := filepath.Dir(fileName)
	baseName := filepath.Base(fileName)
	ext := filepath.Ext(baseName)
	prefix := strings.TrimSuffix(baseName, ext)

	entries, err := os.ReadDir(filePath)
	if err != nil {
		return
	}

	type logInfo struct {
		fullPath  string
		timestamp time.Time
		index     int64
	}

	var backupLogs []logInfo
	var compressLogs []logInfo
	waitLogs := make([]logInfo, 0, 8)

	for _, entry := range entries {
		if entry.IsDir() {
			continue
		}

		name := entry.Name()
		if name == baseName {
			continue
		}

		if !strings.HasPrefix(name, prefix) {
			continue
		}

		gzExt := ext + CompressSuffix
		if strings.HasSuffix(name, gzExt) {
			timestamp, index := parseBackupName(name, prefix, gzExt)
			compressLogs = append(compressLogs, logInfo{
				fullPath:  filepath.Join(filePath, name),
				timestamp: timestamp,
				index:     index,
			})
		} else if strings.HasSuffix(name, ext) {
			timestamp, index := parseBackupName(name, prefix, ext)
			if !timestamp.IsZero() {
				backupLogs = append(backupLogs, logInfo{
					fullPath:  filepath.Join(filePath, name),
					timestamp: timestamp,
					index:     index,
				})
			}
		}
	}

	sort.Slice(backupLogs, func(i, j int) bool {
		if backupLogs[i].timestamp.Equal(backupLogs[j].timestamp) {
			return backupLogs[i].index > backupLogs[j].index
		}
		return backupLogs[i].timestamp.After(backupLogs[j].timestamp)
	})

	if ts.cfg.MaxBackups > 0 && len(backupLogs) > ts.cfg.MaxBackups {
		waitLogs = backupLogs[ts.cfg.MaxBackups:]
		backupLogs = backupLogs[:ts.cfg.MaxBackups]
	}

	if ts.cfg.MaxAge > 0 {
		expiredTime := time.Now().Add(-time.Duration(ts.cfg.MaxAge) * 24 * time.Hour)
		expiredIndex := sort.Search(len(backupLogs), func(i int) bool {
			return backupLogs[i].timestamp.Before(expiredTime)
		})

		if expiredIndex < len(backupLogs) {
			waitLogs = append(backupLogs[expiredIndex:], waitLogs...)
			backupLogs = backupLogs[:expiredIndex]
		}
	}
	if len(waitLogs) == 0 {
		return
	}

	if ts.cfg.Compress {
		if ts.cfg.CompressBackups > 0 && len(waitLogs) > ts.cfg.CompressBackups {
			removeFile := waitLogs[ts.cfg.CompressBackups:]
			for _, file := range removeFile {
				_ = os.Remove(file.fullPath)
			}
			waitLogs = waitLogs[:ts.cfg.CompressBackups]
		}
		for _, file := range waitLogs {
			oldName := file.fullPath
			newName := oldName + CompressSuffix
			if err = ts.compressLog(oldName, newName); err == nil {
				file.fullPath = newName
				compressLogs = append(compressLogs, file)
				_ = os.Remove(oldName)
			}
		}
	} else {
		for _, file := range waitLogs {
			_ = os.Remove(file.fullPath)
		}
	}

	if !(ts.cfg.CompressAge > 0 || ts.cfg.CompressBackups > 0) {
		return
	}

	sort.Slice(compressLogs, func(i, j int) bool {
		if compressLogs[i].timestamp.Equal(compressLogs[j].timestamp) {
			return compressLogs[i].index > compressLogs[j].index
		}
		return compressLogs[i].timestamp.After(compressLogs[j].timestamp)
	})
	waitLogs = waitLogs[:0]
	if ts.cfg.CompressBackups > 0 && len(compressLogs) > ts.cfg.CompressBackups {
		waitLogs = compressLogs[ts.cfg.CompressBackups:]
		compressLogs = compressLogs[:ts.cfg.CompressBackups]
	}

	if ts.cfg.CompressAge > 0 {
		expiredTime := time.Now().Add(-time.Duration(ts.cfg.CompressAge) * 24 * time.Hour)
		expiredIndex := sort.Search(len(compressLogs), func(i int) bool {
			return compressLogs[i].timestamp.Before(expiredTime)
		})

		if expiredIndex < len(compressLogs) {
			waitLogs = append(compressLogs[expiredIndex:], waitLogs...)
			compressLogs = compressLogs[:expiredIndex]
		}
	}
	for _, file := range waitLogs {
		_ = os.Remove(file.fullPath)
	}
}

func (ts *FileWriter) compressLog(src, dst string) (err error) {
	gzf, err := os.OpenFile(dst, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0644)
	if err != nil {
		return err
	}
	defer func() {
		defer gzf.Close()
		if err != nil {
			os.Remove(dst)
		}
	}()

	f, err := os.Open(src)
	if err != nil {
		return fmt.Errorf("failed to open log file: %v", err)
	}
	defer f.Close()
	gz := gzip.NewWriter(gzf)
	defer gz.Close()
	_, err = io.Copy(gz, f)
	return err
}
