package main

import (
	"bufio"
	"bytes"
	"compress/flate"
	"compress/gzip"
	"crypto/aes"
	"crypto/cipher"
	"crypto/tls"
	"encoding/csv"
	"encoding/hex"
	"encoding/json"
	"flag"
	"fmt"
	"io"
	"log"
	"log/slog"
	"math/rand/v2"
	"net/http"
	"net/http/cookiejar"
	"net/url"
	"os"
	"os/exec"
	"path"
	"path/filepath"
	"regexp"
	"strconv"
	"strings"
	"sync"
	"sync/atomic"
	"time"

	"github.com/yapingcat/gomedia/go-mp4"
	"github.com/yapingcat/gomedia/go-mpeg2"
)

var (
	debugId   bool
	skipBadTs bool
	m3u8only  bool
)

func main() {
	flag.Usage = func() {
		fmt.Fprintf(flag.CommandLine.Output(), "%s [flags] m3u8\n\n", filepath.Base(os.Args[0]))
		flag.PrintDefaults()
	}
	var (
		ts2mp4Mode bool
		configFile string
	)
	flag.BoolVar(&ts2mp4Mode, "ts2mp4", false, "convert ts to mp4")
	flag.BoolVar(&m3u8only, "m3u8", false, "fetch m3u8 only")
	flag.BoolVar(&debugId, "id", false, "debug id")
	flag.BoolVar(&skipBadTs, "skipbad", false, "skip bad ts")
	flag.StringVar(&configFile, "c", "config.json", "configuration file")
	flag.Parse()

	if ts2mp4Mode {
		if flag.NArg() != 2 {
			fmt.Fprintf(flag.CommandLine.Output(), "%s -ts2mp4 source mp4\n", filepath.Base(os.Args[0]))
			os.Exit(1)
		}
		convertTsToMp4(flag.Arg(0), flag.Arg(1))
		return
	}

	if flag.NArg() != 1 {
		flag.Usage()
		os.Exit(1)
	}
	downloadM3u8(configFile, flag.Arg(0))
}

func newTsMp4Muxer(out io.WriteSeeker) (*mp4.Movmuxer, *mpeg2.TSDemuxer, error) {
	mp4Muxer, err := mp4.CreateMp4Muxer(out)
	if err != nil {
		return nil, nil, err
	}

	var (
		vtId uint32
		atId uint32
	)
	videoTrackAdded := false
	audioTrackAdded := false
	tsDemuxer := mpeg2.NewTSDemuxer()
	tsDemuxer.OnFrame = func(cid mpeg2.TS_STREAM_TYPE, frame []byte, pts, dts uint64) {
		switch cid {
		case mpeg2.TS_STREAM_H264:
			if !videoTrackAdded {
				vtId = mp4Muxer.AddVideoTrack(mp4.MP4_CODEC_H264)
				videoTrackAdded = true
			}
			err := mp4Muxer.Write(vtId, frame, pts, dts)
			if err != nil {
				fatalf("write mp4(h264) error: %v\n", err)
			}
		case mpeg2.TS_STREAM_AAC:
			if !audioTrackAdded {
				atId = mp4Muxer.AddAudioTrack(mp4.MP4_CODEC_AAC)
				audioTrackAdded = true
			}
			err := mp4Muxer.Write(atId, frame, pts, dts)
			if err != nil {
				fatalf("write mp4(aac) error: %v\n", err)
			}
		case mpeg2.TS_STREAM_AUDIO_MPEG1, mpeg2.TS_STREAM_AUDIO_MPEG2:
			if !audioTrackAdded {
				atId = mp4Muxer.AddAudioTrack(mp4.MP4_CODEC_MP3)
				audioTrackAdded = true
			}
			err := mp4Muxer.Write(atId, frame, pts, dts)
			if err != nil {
				fatalf("write mp4(mp3) error: %v\n", err)
			}
		case mpeg2.TS_STREAM_H265:
			if !videoTrackAdded {
				vtId = mp4Muxer.AddVideoTrack(mp4.MP4_CODEC_H265)
				videoTrackAdded = true
			}
			err := mp4Muxer.Write(vtId, frame, pts, dts)
			if err != nil {
				fatalf("write mp4(h265) error: %v\n", err)
			}
		default:
			slog.Warn("unknown stream type", "cid", cid)
		}
	}

	return mp4Muxer, tsDemuxer, nil
}

func convertTsToMp4(sourcePath string, mp4File string) {
	slog.Debug("convert ts to mp4", "sourcePath", sourcePath, "mp4File", mp4File)

	out, err := os.OpenFile(mp4File, os.O_CREATE|os.O_TRUNC|os.O_RDWR, 0644)
	if err != nil {
		fatalf("create mp4 file failed: %v\n", err)
	}
	defer out.Close()

	mp4Muxer, tsDemuxer, err := newTsMp4Muxer(out)
	if err != nil {
		fatalf("create mp4 muxer failed: %v\n", err)
	}

	remuxFile := func(path string) (err error) {
		f, err := os.Open(path)
		if err != nil {
			return err
		}
		defer f.Close()
		defer func() {
			e := recover()
			if e != nil {
				err = fmt.Errorf("DEMUX: %v", e)
			}

			if err != nil {
				if skipBadTs {
					slog.Warn("remux", "file", path, "err", err)
					err = nil
				} else {
					err = fmt.Errorf("%w, file: %s", err, path)
				}
			}
		}()
		err = tsDemuxer.Input(f)
		return
	}

	fi, err := os.Stat(sourcePath)
	if err != nil {
		fatalf("stat ts dir failed: %v\n", err)
	}
	if fi.IsDir() {
		err = filepath.WalkDir(sourcePath, func(path string, d os.DirEntry, err error) error {
			if err != nil {
				return err
			}
			if d.IsDir() {
				return nil
			}
			if !strings.HasSuffix(path, ".ts") {
				return nil
			}
			return remuxFile(path)
		})
		if err != nil {
			fatalf("process ts directory failed: %v\n", err)
		}
	} else {
		err := remuxFile(sourcePath)
		if err != nil {
			fatalf("process ts file failed: %v\n", err)
		}
	}

	err = mp4Muxer.WriteTrailer()
	if err != nil {
		fatalf("write mp4 trailer failed: %v\n", err)
	}
}

func downloadM3u8(configFile string, m3u8Url string) {
	config, err := loadConfig(configFile)
	if err != nil {
		fatalf("load configuration: %v\n", err)
	}

	if config.PieceStore == "" {
		config.PieceStore = "disk"
	}
	if config.LogFile != "" {
		f, err := os.OpenFile(config.LogFile, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0644)
		if err != nil {
			fatalf("open log file: %v\n", err)
		}
		defer f.Close()
		log.SetOutput(f)
	}
	slog.SetLogLoggerLevel(config.LogLevel)

	fileId := extractFileId(m3u8Url, config.FileIdExtracts)

	if debugId {
		fmt.Println(fileId)
		os.Exit(0)
	}

	slog.Debug("locate file id", "fileId", fileId)

	// 如果 mp4 文件已经存在，则不需要下载
	mp4File := filepath.Join(config.OutDir, fileId+".mp4")

	if config.FileDB != "" {
		ok, err := checkFromDB(config.FileDB, fileId+".mp4")
		if err != nil {
			fatalf("check from db: %v\n", err)
		}
		if ok {
			slog.Info("file id already exists in db", "fileId", fileId)
			return
		}
	}

	existed, err := isFileExist(mp4File)
	if err != nil {
		fatalf("check mp4 file: %v\n", err)
	}
	if existed {
		slog.Info("mp4 file already exists", "file", mp4File)
		return
	}

	downloadDir := filepath.Join(config.OutDir, "dl-"+fileId)
	err = os.MkdirAll(downloadDir, 0755)
	if err != nil {
		fatalf("create output directory: %v\n", err)
	}

	// 根据配置选择写入方式
	var (
		mediaWriter MediaWriter

		rawTsFile  string
		logFile    string
		tmpMp4File string
	)

	// 让 mediaWriter 的 Close 在 convert 前执行
	func() {
		switch config.WriteMedia {
		case "direct":
			rawTsFile = filepath.Join(config.OutDir, fileId+".tmp.ts")
			mediaWriter = &DirectMediaWriter{}
		case "merge":
			rawTsFile = filepath.Join(config.OutDir, fileId+".tmp.ts")
			logFile = rawTsFile + ".log"
			mmw, err := loadMergeMediaWriter(rawTsFile, logFile, config.PieceStore)
			if err != nil {
				fatalf("load merge media writer: %v\n", err)
			}
			defer mmw.Close()
			mediaWriter = mmw
		case "mp4", "":
			tmpMp4File = filepath.Join(config.OutDir, fileId+".tmp.mp4")
			mmw, err := loadMp4MediaWriter(tmpMp4File, config.PieceStore)
			if err != nil {
				fatalf("load merge media writer: %v\n", err)
			}
			defer mmw.Close()
			mediaWriter = mmw
		default:
			fatalf("invalid write media: %s\n", config.WriteMedia)
		}

		// 下载 m3u8 数据到指定目录
		dl := newDownloader(config.Download, mediaWriter)
		defer dl.Close()

		err = dl.downloadM3u8(m3u8Url, downloadDir)
		if err != nil {
			fatalf("download m3u8: %v\n", err)
		}

		slog.Debug("download m3u8 success")
	}()

	// 如果是 direct 模式，先合并 ts 文件，ts2mp4 不需要合并 ts 文件
	if config.WriteMedia == "direct" && config.Convert != "ts2mp4" {
		slog.Debug("merge ts", "file", rawTsFile, "dir", downloadDir)

		err := mergeTsFiles(rawTsFile, downloadDir)
		if err != nil {
			fatalf("merge ts: %v\n", err)
		}
	}

	// 如果 writeMedia 不是 mp4，需要处理 convert
	if config.WriteMedia != "mp4" {
		switch config.Convert {
		case "":
			// 仅重命名文件
			_ = os.Rename(rawTsFile, mp4File)
			rawTsFile = ""
		case "ts2mp4":
			if config.WriteMedia == "direct" {
				convertTsToMp4(downloadDir, mp4File)
			} else if config.WriteMedia == "merge" {
				convertTsToMp4(rawTsFile, mp4File)
			} else {
				fatalf("unexpected writeMedia: %s\n", config.WriteMedia)
			}
		case "cmd":
			runConvertCommand(config.ConvertCommand, mp4File, rawTsFile)
		default:
			fatalf("invalid convert method: %s\n", config.Convert)
		}
	}

	if tmpMp4File != "" {
		_ = os.Rename(tmpMp4File, mp4File)
	}

	if config.CleanDownload {
		if rawTsFile != "" {
			_ = os.Remove(rawTsFile)
		}
		if logFile != "" {
			_ = os.Remove(logFile)
		}

		slog.Info("clean download", "dir", downloadDir)
		err := os.RemoveAll(downloadDir)
		if err != nil {
			fatalf("clean download: %v\n", err)
		}
	}
}

func fatalf(format string, args ...any) {
	fmt.Fprintf(os.Stderr, format, args...)
	os.Exit(1)
}

func runConvertCommand(convCmd []string, mp4File string, rawTsFile string) {
	wd, _ := os.Getwd()
	vars := map[string]string{
		"{wd}":     wd,
		"{self}":   os.Args[0],
		"{input}":  rawTsFile,
		"{output}": mp4File,
	}
	for i, s := range convCmd {
		for name, value := range vars {
			s = strings.ReplaceAll(s, name, value)
		}
		convCmd[i] = s
	}

	slog.Info("run convert", "command", convCmd)

	cmd := exec.Command(convCmd[0], convCmd[1:]...)
	cmd.Stdout = os.Stdout
	cmd.Stderr = os.Stderr
	err := cmd.Run()
	if err != nil {
		fatalf("run convert: %v\n", err)
	}
}

type FileIdExtract struct {
	Match  string `json:"match"`
	Naming string `json:"naming"`
}

type DownloadConfig struct {
	MaxTries           int               `json:"maxTries"`
	AutoClose          int               `json:"autoClose"`
	Timeout            string            `json:"timeout"`
	Headers            map[string]string `json:"headers"`
	InsecureSkipVerify bool              `json:"insecureSkipVerify"`
	Workers            int               `json:"workers"`
	BatchSize          int               `json:"batchSize"`
	MaxFails           int               `json:"maxFails"`
	SyncTitle          bool              `json:"syncTitle"`
}

type Config struct {
	LogLevel       slog.Level       `json:"logLevel"`
	LogFile        string           `json:"logFile"`
	FileDB         string           `json:"fileDB"`
	FileIdExtracts []*FileIdExtract `json:"fileIdExtracts"`
	Download       *DownloadConfig  `json:"download"`
	OutDir         string           `json:"outDir"`
	PieceStore     string           `json:"pieceStore"`
	WriteMedia     string           `json:"writeMedia"` // direct/merge
	Convert        string           `json:"convert"`
	ConvertCommand []string         `json:"convertCommand"`
	CleanDownload  bool             `json:"cleanDownload"`
}

func loadConfig(configFile string) (*Config, error) {
	data, err := os.ReadFile(configFile)
	if err != nil {
		return nil, err
	}
	var conf Config
	err = json.Unmarshal(data, &conf)
	if err != nil {
		return nil, err
	}
	return &conf, nil
}

func checkFromDB(dbFile string, name string) (bool, error) {
	f, err := os.Open(dbFile)
	if err != nil {
		return false, err
	}
	defer f.Close()
	r := csv.NewReader(f)
	r.ReuseRecord = true
	for {
		record, err := r.Read()
		if err != nil {
			if err == io.EOF {
				break
			}
			return false, err
		}
		if record[0] == name {
			return true, nil
		}
	}
	return false, nil
}

type MediaWriter interface {
	IsExist(name string) (bool, error)
	WriteFile(index int, name string, data []byte) error
	CloneDeep() MediaWriter
}

type DownloadJob struct {
	md    *MediaDownloader
	index int
	media string
	wg    *sync.WaitGroup
}

type Downloader struct {
	hc          *http.Client
	config      *DownloadConfig
	mediaWriter MediaWriter
	depth       int

	jobCh    chan *DownloadJob
	jobWg    *sync.WaitGroup
	jobTotal *atomic.Int64
	jobDone  *atomic.Int64
	jobFails *atomic.Int64

	autoCloseCounter atomic.Int64
}

func (dl *Downloader) Close() {
	close(dl.jobCh)
	dl.jobWg.Wait()
}

func newDownloader(config *DownloadConfig, mediaWriter MediaWriter) *Downloader {
	if config.MaxTries <= 0 {
		config.MaxTries = 5
	}
	if config.Workers <= 0 {
		config.Workers = 1
	}

	timeout, _ := time.ParseDuration(config.Timeout)
	jar, _ := cookiejar.New(nil)
	dl := &Downloader{
		hc: &http.Client{
			Jar:     jar,
			Timeout: timeout,
			Transport: &http.Transport{
				Proxy: http.ProxyFromEnvironment,
				TLSClientConfig: &tls.Config{
					InsecureSkipVerify: config.InsecureSkipVerify,
				},
			},
		},
		config:      config,
		mediaWriter: mediaWriter,
		depth:       0,
		jobCh:       make(chan *DownloadJob, config.Workers),
		jobWg:       &sync.WaitGroup{},
		jobTotal:    &atomic.Int64{},
		jobDone:     &atomic.Int64{},
		jobFails:    &atomic.Int64{},
	}

	for range config.Workers {
		dl.jobWg.Add(1)
		go dl.downloadWorker()
	}

	return dl
}

func (dl *Downloader) downloadWorker() {
	defer dl.jobWg.Done()
	for job := range dl.jobCh {
		err := job.md.saveMedia(job.index, job.media)
		job.md.dl.handleJobResult(job, err)
		if job.wg != nil {
			job.wg.Done()
		}
	}
}

func (dl *Downloader) handleJobResult(job *DownloadJob, err error) {
	done := dl.jobDone.Add(1)
	if err != nil {
		slog.Error("download job error", "index", job.index, "media", job.media, "err", err)
		fails := dl.jobFails.Add(1)
		if fails >= int64(dl.config.MaxFails) {
			fatalf("too many fails: %d\n", fails)
		}
		return
	}

	progress := fmt.Sprintf("%d(-%d)/%d", done, dl.jobFails.Load(), dl.jobTotal.Load())
	slog.Info("job finish", "progress", progress)
	if dl.config.SyncTitle {
		fmt.Printf("\033]0;%s\007", progress)
	}
}

func (dl *Downloader) cloneDeep() *Downloader {
	return &Downloader{
		hc:          dl.hc,
		config:      dl.config,
		mediaWriter: dl.mediaWriter.CloneDeep(),
		depth:       dl.depth + 1,
		jobCh:       dl.jobCh,
		jobWg:       dl.jobWg,
		jobTotal:    dl.jobTotal,
		jobDone:     dl.jobDone,
		jobFails:    dl.jobFails,
	}
}

func (dl *Downloader) downloadM3u8(m3u8Url string, downloadDir string) error {
	slog.Info("download m3u8", "m3u8Url", m3u8Url, "downloadDir", downloadDir)

	m3u8Path := filepath.Join(downloadDir, "index.m3u8")
	m3u8Data, err := dl.getWithCache(m3u8Url, m3u8Path)
	if err != nil {
		return err
	}

	if m3u8only {
		os.Exit(1)
	}

	indexM3u8, err := parseM3U8(m3u8Data)
	if err != nil {
		return err
	}

	slog.Debug("parse m3u8", "duration", indexM3u8.Duration)

	parsedM3u8Url, err := url.Parse(m3u8Url)
	if err != nil {
		return err
	}
	var decryptor Decryptor
	key := indexM3u8.X["KEY"]
	if key != "" {
		slog.Debug("found key", "key", key)

		attrs, err := parseM3U8Attributes(key)
		if err != nil {
			return err
		}
		decryptor, err = getDecryptor(attrs, func(uri string) ([]byte, error) {
			return dl.getWithCache(getResourceUrl(parsedM3u8Url, uri), filepath.Join(downloadDir, "enc.key"))
		})
		if err != nil {
			return err
		}
	} else {
		decryptor = &NoneDecryptor{}
	}

	md := &MediaDownloader{
		dl:        dl,
		baseUrl:   parsedM3u8Url,
		saveDir:   downloadDir,
		decryptor: decryptor,
	}

	dl.jobTotal.Add(int64(len(indexM3u8.Media)))

	var batchWait *sync.WaitGroup
	for i, media := range indexM3u8.Media {
		if dl.config.BatchSize > 0 {
			if i%dl.config.BatchSize == 0 {
				// 等待前一组完成后才下载下一组
				if batchWait != nil {
					batchWait.Wait()
				}
				slog.Debug("download batch", "batch", i/dl.config.BatchSize)
				batchWait = &sync.WaitGroup{}
			}
			batchWait.Add(1)
		}
		dl.jobCh <- &DownloadJob{
			md:    md,
			index: i,
			media: media,
			wg:    batchWait,
		}
	}
	// 不需要等待剩余任务完成
	return nil
}

func (dl *Downloader) getWithCache(resourceUrl string, savePath string) ([]byte, error) {
	data, err := os.ReadFile(savePath)
	if err == nil {
		return data, nil
	} else if !os.IsNotExist(err) {
		return nil, err
	}

	resourceData, err := dl.getHttp(resourceUrl)
	if err != nil {
		return nil, err
	}
	err = os.WriteFile(savePath, resourceData.Bytes(), 0644)
	if err != nil {
		return nil, err
	}
	return resourceData.Bytes(), nil
}

func (dl *Downloader) shouldCloseConn() bool {
	cnt := dl.autoCloseCounter.Add(1)
	switch dl.config.AutoClose {
	case 0:
		return false
	default:
		if dl.config.AutoClose > 0 {
			return cnt > int64(dl.config.AutoClose)
		} else if cnt > int64(-dl.config.AutoClose) {
			return rand.IntN(100) > 50
		} else {
			return false
		}
	}
}

func (dl *Downloader) doGetHttp(url string, w io.Writer) error {
	req, err := http.NewRequest(http.MethodGet, url, nil)
	if err != nil {
		return err
	}

	for k, v := range dl.config.Headers {
		req.Header.Set(k, v)
	}

	if dl.shouldCloseConn() {
		slog.Debug("should close conn")
		req.Header.Set("Connection", "close")
		req.Close = true
		dl.autoCloseCounter.Store(0)
	}

	resp, err := dl.hc.Do(req)
	if err != nil {
		return err
	}
	defer resp.Body.Close()

	if resp.StatusCode != 200 {
		io.Copy(io.Discard, resp.Body)
		return fmt.Errorf("status code: %d", resp.StatusCode)
	}

	switch resp.Header.Get("Content-Encoding") {
	case "gzip":
		br, err := gzip.NewReader(resp.Body)
		if err != nil {
			return err
		}
		defer br.Close()
		_, err = io.Copy(w, br)
	case "deflate":
		br := flate.NewReader(resp.Body)
		defer br.Close()
		_, err = io.Copy(w, br)
	default:
		_, err = io.Copy(w, resp.Body)
	}
	return err
}

func (dl *Downloader) getHttp(url string) (*bytes.Buffer, error) {
	var (
		err    error
		tsData bytes.Buffer
	)
	for i := 0; i < dl.config.MaxTries; i++ {
		tsData.Reset()
		err = dl.doGetHttp(url, &tsData)
		if err == nil {
			return &tsData, nil
		}
		slog.Warn("get http error", "url", url, "err", err)
		time.Sleep(time.Second)
	}
	return nil, err
}

type MediaDownloader struct {
	dl        *Downloader
	baseUrl   *url.URL
	saveDir   string
	decryptor Decryptor
}

func (md *MediaDownloader) download(index int, media string, outName string) error {
	mediaUrl := getResourceUrl(md.baseUrl, media)

	slog.Debug("download media", "index", index, "mediaUrl", mediaUrl, "out", outName)

	if strings.HasSuffix(outName, ".m3u8") {
		dl := md.dl.cloneDeep()
		if dl.depth > 10 {
			return fmt.Errorf("too many recursive calls")
		}
		outDir := strings.TrimSuffix(outName, ".m3u8")
		err := os.MkdirAll(outDir, 0755)
		if err != nil {
			return err
		}
		return dl.downloadM3u8(mediaUrl, outDir)
	}

	tsData, err := md.dl.getHttp(mediaUrl)
	if err != nil {
		return err
	}

	data, err := md.decryptor.Decrypt(tsData.Bytes())
	if err != nil {
		return err
	}

	return md.dl.mediaWriter.WriteFile(index, outName, data)
}

func (md *MediaDownloader) saveMedia(index int, media string) error {
	slog.Debug("saveMedia", "index", index, "media", media)

	mediaPath, _, _ := strings.Cut(media, "?")
	mediaExt := path.Ext(mediaPath)
	outName := filepath.Join(md.saveDir, fmt.Sprintf("%06d", index)+mediaExt)
	existed, err := md.dl.mediaWriter.IsExist(outName)
	if err != nil {
		return err
	}
	if existed {
		slog.Debug("skip download", "media", media, "out", outName)
		return nil
	}
	return md.download(index, media, outName)
}

func getResourceUrl(baseUrl *url.URL, resourceUri string) string {
	if strings.Contains(resourceUri, "://") {
		return resourceUri
	}
	if strings.HasPrefix(resourceUri, "/") {
		return baseUrl.Scheme + "://" + baseUrl.Host + resourceUri
	} else {
		return baseUrl.Scheme + "://" + baseUrl.Host + path.Join(path.Dir(baseUrl.EscapedPath()), resourceUri)
	}
}

type M3U8 struct {
	X        map[string]string
	Duration float64
	Media    []string
}

func parseM3U8(data []byte) (*M3U8, error) {
	m := &M3U8{
		X: make(map[string]string),
	}
	state := 0 // 0-wait extm3u, 1-read metadata, 2-read media, 3-end
	br := bufio.NewReader(bytes.NewReader(data))
	for state < 3 {
		lineData, _, err := br.ReadLine()
		if err != nil {
			if err == io.EOF {
				if len(lineData) == 0 {
					break
				}
			} else {
				return nil, err
			}
		}
		lineData = bytes.TrimSpace(lineData)
		if len(lineData) == 0 {
			continue
		}
		line := string(lineData)
		switch state {
		case 0:
			if line == "#EXTM3U" {
				state = 1
			} else {
				return nil, fmt.Errorf("invalid m3u8 file")
			}
		case 1:
			const metadataPrefix = "#EXT-X-"
			if strings.HasPrefix(line, metadataPrefix) && strings.Contains(line, ":") {
				key, value, _ := strings.Cut(line, ":")
				m.X[key[len(metadataPrefix):]] = value
				break
			}
			state = 2
			fallthrough
		case 2:
			if strings.HasPrefix(line, "#") {
				if strings.HasPrefix(line, "#EXTINF:") {
					duration, _, _ := strings.Cut(line[len("#EXTINF:"):], ",")
					d, err := strconv.ParseFloat(duration, 64)
					if err != nil {
						return nil, fmt.Errorf("invalid EXTINF: %s", line)
					}
					m.Duration += d
				} else if line == "#EXT-X-ENDLIST" {
					state = 3
				} else if line != "#EXT-X-DISCONTINUITY" {
					return nil, fmt.Errorf("invalid line: %s", line)
				}
			} else {
				m.Media = append(m.Media, line)
			}
		}
	}
	return m, nil
}

func parseM3U8Attributes(value string) (map[string]any, error) {
	attrs := make(map[string]any)
	state := 0
	i := 0
	var key string
	for i < len(value) {
		switch state {
		case 0:
			// read key [a-zA-Z0-9_-]
			begin := i
			for i < len(value) {
				if isletter(value[i]) || isdigit(value[i]) || value[i] == '_' || value[i] == '-' {
					i++
				} else {
					break
				}
			}
			if begin == i {
				return nil, fmt.Errorf("invalid value: %s", value)
			}
			key = value[begin:i]
			state = 1
		case 1:
			// read =
			if value[i] == '=' {
				i++
				state = 2
			} else {
				return nil, fmt.Errorf("invalid value: %s", value)
			}
		case 2:
			// read until ,
			begin := i
			for i < len(value) && value[i] != ',' {
				i++
			}
			v := value[begin:i]
			if len(v) >= 2 && v[0] == '"' && v[len(v)-1] == '"' {
				s, err := strconv.Unquote(v)
				if err != nil {
					return nil, fmt.Errorf("invalid value: %s, error: %v", value, err)
				}
				v = s
			}
			attrs[key] = v
			if i < len(value) {
				i++
			}
			state = 0
		}
	}
	return attrs, nil
}

type Decryptor interface {
	Decrypt(data []byte) ([]byte, error)
}

func getDecryptor(keyAttrs map[string]any, getKey func(uri string) ([]byte, error)) (Decryptor, error) {
	slog.Debug("getDecryptWrapper", "keyAttrs", keyAttrs)

	method, ok := keyAttrs["METHOD"].(string)
	if !ok {
		method = "NONE"
	}
	uri, _ := keyAttrs["URI"].(string)
	iv, _ := keyAttrs["IV"].(string)

	switch strings.ToUpper(method) {
	case "NONE":
		return &NoneDecryptor{}, nil
	case "AES-128":
		ivData, err := decodeIV(iv)
		if err != nil {
			return nil, err
		}
		keyData, err := getKey(uri)
		if err != nil {
			return nil, err
		}
		return &AesDecryptor{
			key: keyData,
			iv:  ivData,
		}, nil
	default:
		return nil, fmt.Errorf("unsupported method: %s", method)
	}
}

func decodeIV(iv string) ([]byte, error) {
	if !strings.HasPrefix(iv, "0x") {
		return nil, fmt.Errorf("invalid iv: %s", iv)
	}
	return hex.DecodeString(iv[2:])
}

type NoneDecryptor struct {
}

func (*NoneDecryptor) Decrypt(data []byte) ([]byte, error) {
	return data, nil
}

type AesDecryptor struct {
	key []byte
	iv  []byte
}

func (d *AesDecryptor) Decrypt(cryptedData []byte) ([]byte, error) {
	b, err := aes.NewCipher(d.key)
	if err != nil {
		return nil, err
	}
	bm := cipher.NewCBCDecrypter(b, d.iv)
	plainData := make([]byte, len(cryptedData))
	bm.CryptBlocks(plainData, cryptedData)
	return unpaddingPKCS7(plainData), nil
}

func isdigit(c byte) bool {
	return c >= '0' && c <= '9'
}

func isletter(c byte) bool {
	return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z')
}

func unpaddingPKCS7(data []byte) []byte {
	unpadding := int(data[len(data)-1])
	return data[:(len(data) - unpadding)]
}

func isFileExist(fname string) (bool, error) {
	_, err := os.Stat(fname)
	if err == nil {
		return true, nil
	} else if os.IsNotExist(err) {
		return false, nil
	} else {
		return false, err
	}
}

func extractFileId(m3u8Url string, fileIdExtracts []*FileIdExtract) string {
	for _, extract := range fileIdExtracts {
		if extract.Naming == "" {
			continue
		}

		re := regexp.MustCompile(extract.Match)

		matches := re.FindStringSubmatchIndex(m3u8Url)
		if len(matches) > 0 {
			result := re.ExpandString(nil, extract.Naming, m3u8Url, matches)
			if len(result) == 0 {
				continue
			}
			return string(result)
		}
	}
	return time.Now().Format("20060102150405")
}

type DirectMediaWriter struct {
}

func (d *DirectMediaWriter) IsExist(name string) (bool, error) {
	return isFileExist(name)
}

func (d *DirectMediaWriter) WriteFile(_ int, name string, data []byte) error {
	return os.WriteFile(name, data, 0644)
}

func (d *DirectMediaWriter) CloneDeep() MediaWriter {
	return d
}

type MergeMediaWriter struct {
	rawTs     *os.File
	log       *os.File
	ts        map[string]bool
	lock      *sync.Mutex
	nextIndex int
	pieceBuf  *PieceBuffer
}

func (m *MergeMediaWriter) Close() {
	m.rawTs.Close()
	m.log.Close()
}

func (m *MergeMediaWriter) IsExist(name string) (bool, error) {
	if strings.HasSuffix(name, ".ts") {
		return m.ts[name], nil
	}
	return isFileExist(name)
}

func (m *MergeMediaWriter) writeNextPiece(name string, data []byte) error {
	// 标记写入长度
	_, err := m.log.WriteString("+" + strconv.Itoa(len(data)) + "\n")
	if err != nil {
		return err
	}
	_, err = m.rawTs.Write(data)
	if err != nil {
		return err
	}
	// 写入成功
	_, err = m.log.WriteString(name + "\n")
	if err != nil {
		return err
	}
	m.ts[name] = true
	return nil
}

func (m *MergeMediaWriter) writeTsData(index int, name string, data []byte) error {
	data = trimSyncByte(data)

	m.lock.Lock()
	defer m.lock.Unlock()

	if index != m.nextIndex {
		err := m.pieceBuf.Add(index, name, data)
		if err != nil {
			return err
		}
	} else {
		err := m.writeNextPiece(name, data)
		if err != nil {
			return err
		}
		nextIndex := index + 1
		for {
			data, ok, err := m.pieceBuf.Get(nextIndex)
			if err != nil {
				return err
			}
			if !ok {
				break
			}
			err = m.writeNextPiece(name, data)
			if err != nil {
				return err
			}
			nextIndex++
		}
		m.nextIndex = nextIndex
	}

	return nil
}

func (m *MergeMediaWriter) WriteFile(index int, name string, data []byte) error {
	if strings.HasSuffix(name, ".ts") {
		return m.writeTsData(index, name, data)
	}
	return os.WriteFile(name, data, 0644)
}

func (m *MergeMediaWriter) CloneDeep() MediaWriter {
	return &MergeMediaWriter{
		rawTs:     m.rawTs,
		log:       m.log,
		ts:        m.ts,
		lock:      m.lock,
		nextIndex: 0,
		pieceBuf:  newPieceBuffer(m.pieceBuf.store),
	}
}

func loadMergeMediaWriter(rawTsFile string, logFile string, pieceStore string) (*MergeMediaWriter, error) {
	ts, checkSize, err := loadTsLog(logFile)
	if err != nil {
		return nil, err
	}
	fi, err := os.Stat(rawTsFile)
	if err != nil {
		if !os.IsNotExist(err) {
			return nil, err
		}
	} else {
		fileSize := fi.Size()
		if fileSize < checkSize {
			return nil, fmt.Errorf("file size is smaller than log: %d < %d", fileSize, checkSize)
		}
		if fileSize > checkSize {
			err = os.Truncate(rawTsFile, checkSize)
			if err != nil {
				return nil, err
			}
		}
	}

	rawTs, err := os.OpenFile(rawTsFile, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0644)
	if err != nil {
		return nil, err
	}
	log, err := os.OpenFile(logFile, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0644)
	if err != nil {
		rawTs.Close()
		return nil, err
	}
	return &MergeMediaWriter{
		rawTs:     rawTs,
		log:       log,
		ts:        ts,
		lock:      &sync.Mutex{},
		nextIndex: 0,
		pieceBuf:  newPieceBuffer(pieceStore),
	}, nil
}

type Mp4MediaWriter struct {
	out       *os.File
	muxer     *mp4.Movmuxer
	demuxer   *mpeg2.TSDemuxer
	lock      *sync.Mutex
	nextIndex int
	pieceBuf  *PieceBuffer
}

func (m *Mp4MediaWriter) Close() {
	err := m.muxer.WriteTrailer()
	if err != nil {
		slog.Error("write mp4 trailer", "err", err)
	}
	m.out.Close()
}

func (m *Mp4MediaWriter) IsExist(name string) (bool, error) {
	return false, nil
}

func (m *Mp4MediaWriter) writeTsData(index int, name string, data []byte) error {
	m.lock.Lock()
	defer m.lock.Unlock()

	if index != m.nextIndex {
		err := m.pieceBuf.Add(index, name, data)
		if err != nil {
			return err
		}
	} else {
		err := m.demuxer.Input(bytes.NewReader(data))
		if err != nil {
			return err
		}
		nextIndex := index + 1
		for {
			data, ok, err := m.pieceBuf.Get(nextIndex)
			if err != nil {
				return err
			}
			if !ok {
				break
			}
			err = m.demuxer.Input(bytes.NewReader(data))
			if err != nil {
				return err
			}
			nextIndex++
		}
		m.nextIndex = nextIndex
	}

	return nil
}

func (m *Mp4MediaWriter) WriteFile(index int, name string, data []byte) error {
	if strings.HasSuffix(name, ".ts") {
		return m.writeTsData(index, name, data)
	}
	return os.WriteFile(name, data, 0644)
}

func (m *Mp4MediaWriter) CloneDeep() MediaWriter {
	return &Mp4MediaWriter{
		out:       m.out,
		muxer:     m.muxer,
		demuxer:   m.demuxer,
		lock:      m.lock,
		nextIndex: 0,
		pieceBuf:  newPieceBuffer(m.pieceBuf.store),
	}
}

func loadMp4MediaWriter(mp4File string, pieceStore string) (*Mp4MediaWriter, error) {
	out, err := os.OpenFile(mp4File, os.O_CREATE|os.O_TRUNC|os.O_RDWR, 0644)
	if err != nil {
		return nil, err
	}
	mp4Muxer, tsDemuxer, err := newTsMp4Muxer(out)
	if err != nil {
		out.Close()
		return nil, err
	}

	return &Mp4MediaWriter{
		out:       out,
		muxer:     mp4Muxer,
		demuxer:   tsDemuxer,
		lock:      &sync.Mutex{},
		nextIndex: 0,
		pieceBuf:  newPieceBuffer(pieceStore),
	}, nil
}

func loadTsLog(logFile string) (map[string]bool, int64, error) {
	f, err := os.Open(logFile)
	if err != nil {
		if os.IsNotExist(err) {
			return make(map[string]bool), 0, nil
		}
		return nil, 0, err
	}
	defer f.Close()

	totalSize := int64(0)
	tsSize := int64(0)
	ts := make(map[string]bool)
	br := bufio.NewReader(f)
	for {
		line, _, err := br.ReadLine()
		if err != nil {
			if err != io.EOF {
				return nil, 0, err
			} else if len(line) == 0 {
				break
			}
		}
		line = bytes.TrimSpace(line)
		if len(line) == 0 {
			continue
		}
		if line[0] == '+' {
			sz, err := strconv.Atoi(string(line[1:]))
			if err != nil {
				return nil, 0, err
			}
			tsSize = int64(sz)
		} else {
			totalSize += tsSize
			tsSize = 0
			ts[string(line)] = true
		}
	}
	return ts, totalSize, nil
}

type PieceBuffer struct {
	store   string // 1: mem; 2: disk
	lock    sync.Mutex
	indexes map[int]string
	mem     map[string][]byte
}

func (p *PieceBuffer) Add(index int, name string, data []byte) error {
	p.lock.Lock()
	defer p.lock.Unlock()
	if p.store == "mem" {
		p.mem[name] = data
	} else {
		err := os.WriteFile(name, data, 0644)
		if err != nil {
			return err
		}
	}
	p.indexes[index] = name
	return nil
}

func (p *PieceBuffer) Get(index int) ([]byte, bool, error) {
	p.lock.Lock()
	defer p.lock.Unlock()
	name, ok := p.indexes[index]
	if !ok {
		return nil, false, nil
	}
	var data []byte
	switch p.store {
	case "mem":
		memData, ok := p.mem[name]
		if !ok {
			return nil, false, nil
		}
		data = memData
		delete(p.mem, name)
	case "disk":
		fileData, err := os.ReadFile(name)
		if err != nil {
			return nil, false, err
		}
		data = fileData
	}
	delete(p.indexes, index)
	return data, true, nil
}

func newPieceBuffer(store string) *PieceBuffer {
	return &PieceBuffer{
		store:   store,
		indexes: make(map[int]string),
		mem:     make(map[string][]byte),
	}
}

func mergeDirTs(out io.Writer, dirPath string) error {
	des, err := os.ReadDir(dirPath)
	if err != nil {
		return err
	}
	for _, de := range des {
		path := filepath.Join(dirPath, de.Name())
		if de.IsDir() {
			err := mergeDirTs(out, path)
			if err != nil {
				return err
			}
		} else {
			data, err := os.ReadFile(path)
			if err != nil {
				return err
			}
			data = trimSyncByte(data)
			_, err = out.Write(data)
			if err != nil {
				return err
			}
		}
	}
	return nil
}

func trimSyncByte(data []byte) []byte {
	// https://en.wikipedia.org/wiki/MPEG_transport_stream
	// Some TS files do not start with SyncByte 0x47, they can not be played after merging,
	// Need to remove the bytes before the SyncByte 0x47(71).
	syncByte := uint8(0x47)
	for i := 0; i < len(data); i++ {
		if data[i] == syncByte {
			return data[i:]
		}
	}
	return data
}

func mergeTsFiles(outName string, downloadDir string) error {
	out, err := os.OpenFile(outName, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644)
	if err != nil {
		return err
	}
	defer out.Close()
	return mergeDirTs(out, downloadDir)
}
