// This is a client that writes out to a file, and optionally rolls the file

package main

import (
	"bytes"
	"compress/gzip"
	"flag"
	"fmt"
	"log"
	"os"
	"os/signal"
	"path"
	"regexp"
	"runtime/pprof"
	"strings"
	"sync"
	"syscall"
	"time"

	"golang/nsqthing/internal/app"
	"golang/nsqthing/internal/clusterinfo"
	"golang/nsqthing/internal/http_api"
	"golang/nsqthing/internal/version"

	"github.com/nsqio/go-nsq"
)

var (
	showVersion = flag.Bool("version", false, "print version string")

	cpuprofile = flag.Bool("profile", false, "cpu profile? default: false")

	channel     = flag.String("channel", "nsq_to_file", "nsq channel")
	maxInFlight = flag.Int("max-in-flight", 200, "max number of messages to allow in flight")

	outputDir      = flag.String("output-dir", "/tmp", "directory to write output files to")
	datetimeFormat = flag.String("datetime-format", "%Y%m%d%H", "strftime compatible format for <DATETIME> in filename format")
	filenameFormat = flag.String("filename-format", "<PUBID>.<DATETIME>.<TOPIC><REV>.log", "output filename format (<TOPIC>, <HOST>, <PID>, <DATETIME>, <REV> are replaced. <REV> is increased when file already exists)")
	pubidslice     = flag.String("pubflag", "", "which field is the pubid in the json")
	gzipLevel      = flag.Int("gzip-level", 9, "gzip compression level (1-9, 1=BestSpeed, 9=BestCompression)")
	gzipEnabled    = flag.Bool("gzip", true, "gzip output files.")
	skipEmptyFiles = flag.Bool("skip-empty-files", true, "Skip writing empty files")
	topicPollRate  = flag.Duration("topic-refresh", time.Minute, "how frequently the topic list should be refreshed")
	topicPattern   = flag.String("topic-pattern", ".*", "Only log topics matching the following pattern")

	rotateSize     = flag.Int64("rotate-size", 20000, "rotate the file when it grows bigger than `rotate-size` bytes")
	rotateInterval = flag.Duration("rotate-interval", 0*time.Second, "rotate the file every duration")

	nsqdTCPAddrs     = app.StringArray{}
	lookupdHTTPAddrs = app.StringArray{}
	topics           = app.StringArray{}
)

var chinaLoc *time.Location

const (
	FSIG_RSTOPCHAN = 0
	FSIG_TERCHAN   = 1
	FSIG_HUPCHAN   = 2
	FSIG_TICKERC   = 3
	FSIG_LOGCHAN   = 4
)

func init() {
	chinaLoc, _ = time.LoadLocation("Asia/Chongqing")
	flag.Var(&nsqdTCPAddrs, "nsqd-tcp-address", "nsqd TCP address (may be given multiple times)")
	flag.Var(&lookupdHTTPAddrs, "lookupd-http-address", "lookupd HTTP address (may be given multiple times)")
	flag.Var(&topics, "topic", "nsq topic (may be given multiple times)")
}

type FileLogger struct {
	compressionLevel int
	gzipEnabled      bool
	filenameFormat   string
	absFilename      string

	// for rotation
	lastFilename string
	rev          uint

	// customize
	gzipwritter *gzip.Writer
	gbuffer     *bytes.Buffer
	filecount   int64
}

type ConsumerFileLogger struct {
	F  *FileLogger
	C  *nsq.Consumer
	CF map[string]*FileLogger

	Otolc      chan *nsq.Message //one topic one log chan
	termChan   chan bool
	hupChan    chan bool
	ExitChan   chan int
	exit       bool
	topic      string
	wholecount int64
}

func (cf *ConsumerFileLogger) HandleMessage(m *nsq.Message) error {
	m.DisableAutoResponse()
	cf.Otolc <- m
	return nil
}
func (cf *ConsumerFileLogger) Guga() {
	fmt.Println("[Guga][Beginning][--------]", cf.topic)
	ticker := time.NewTicker(time.Duration(30) * time.Second)
	for {
		select {
		case <-cf.termChan:
			ticker.Stop()
			cf.C.Stop()
			for k := range cf.CF {
				cf.CF[k].Guanli(FSIG_TERCHAN, nil, cf.C)
			}
			cf.exit = true
		case <-ticker.C:
			log.Printf("[pid:%d][topic][%s] -> [%d]\n", os.Getegid(), cf.topic, cf.wholecount)
		case m := <-cf.Otolc:
			index := bytes.Index(m.Body, []byte(*pubidslice))
			if index >= 0 {
				pubID := string(m.Body[index+9 : index+29])
				if strings.Index(pubID, "default") >= 0 {
					m.Finish()
					break
				}
				cf.wholecount++
				if v, ok := cf.CF[pubID]; ok {
					v.Guanli(FSIG_LOGCHAN, m, cf.C)
				} else {
					filelogger, _ := NewCustomFileLogger(*gzipEnabled, *gzipLevel, *filenameFormat, cf.topic, pubID)
					cf.CF[pubID] = filelogger
					filelogger.Guanli(FSIG_LOGCHAN, m, cf.C)
				}
			} else {
				m.Finish()
			}
		}
		if cf.exit == true {
			log.Println("[exit]->", cf.topic)
			close(cf.ExitChan)
			break
		}
	}
}

type TopicDiscoverer struct {
	topics   map[string]*ConsumerFileLogger
	termChan chan os.Signal
	hupChan  chan os.Signal
	wg       sync.WaitGroup
	cfg      *nsq.Config
}

func newTopicDiscoverer(cfg *nsq.Config) *TopicDiscoverer {
	return &TopicDiscoverer{
		topics:   make(map[string]*ConsumerFileLogger),
		termChan: make(chan os.Signal),
		hupChan:  make(chan os.Signal),
		cfg:      cfg,
	}
}
func (f *FileLogger) Write2File() {
	pubID := strings.Split(f.lastFilename, ".")[0]
	spPath := path.Join(*outputDir, pubID)
	fullPath := path.Join(spPath, f.lastFilename)
	var absFilename string
	var outfile *os.File
	openFlag := os.O_WRONLY | os.O_CREATE | os.O_EXCL
	_, err := os.Stat(spPath)
	if os.IsNotExist(err) == true {
		os.MkdirAll(spPath, 0777)
	}
	for {
		absFilename = strings.Replace(fullPath, "<REV>", fmt.Sprintf("-%03d", f.rev), -1)
		outfile, err = os.OpenFile(absFilename, openFlag, 0666)
		if err != nil {
			if os.IsExist(err) {
				f.rev++
				continue
			}
			log.Fatalf("ERROR: %s Unable to open %s", err, absFilename)
		}
		break
	}
	f.gzipwritter.Close()
	outfile.Write(f.gbuffer.Bytes())
	outfile.Sync()
	outfile.Close()
	f.gbuffer.Reset()
	f.gzipwritter, err = gzip.NewWriterLevel(f.gbuffer, f.compressionLevel)
	if err != nil {
		panic("CANT CREATE GZIPWRITTER")
	}
	f.filecount = 0
}
func (f *FileLogger) Init() {
	f.gbuffer = new(bytes.Buffer)
	var err error
	f.gzipwritter, err = gzip.NewWriterLevel(f.gbuffer, f.compressionLevel)
	f.filecount = 0
	if err != nil {
		panic("CANT CREATE GZIPWRITTER")
	}
}
func (f *FileLogger) Guanli(fsig int, msg *nsq.Message, r *nsq.Consumer) {
	switch fsig {
	case FSIG_RSTOPCHAN:
	case FSIG_TERCHAN:
		f.Write2File()
	case FSIG_HUPCHAN:
	case FSIG_LOGCHAN:
		filename := f.calculateCurrentFilename()
		if f.lastFilename == "" {
			f.Init()
		} else {
			if f.lastFilename != filename || f.filecount >= *rotateSize {
				f.Write2File()
			}
		}
		f.gzipwritter.Write(msg.Body)
		f.gzipwritter.Write([]byte{'\n'})
		f.filecount++
		f.lastFilename = filename
		msg.Finish()
	}
}

func (f *FileLogger) calculateCurrentFilename() string {
	t := time.Now().In(chinaLoc)
	datetime := strftime(*datetimeFormat, t)
	return strings.Replace(f.filenameFormat, "<DATETIME>", datetime, -1)
}

func NewCustomFileLogger(gzipEnabled bool, compressionLevel int, filenameFormat, topic, pubID string) (*FileLogger, error) {

	filenameFormat = strings.Replace(filenameFormat, "<TOPIC>", topic, -1)
	filenameFormat = strings.Replace(filenameFormat, "<PUBID>", pubID, -1)

	filenameFormat = filenameFormat + ".gz"

	f := &FileLogger{
		compressionLevel: compressionLevel,
		filenameFormat:   filenameFormat,
	}
	return f, nil
}

func newCustomConsumerFileLogger(topic string, cfg *nsq.Config) (*ConsumerFileLogger, error) {

	consumer, err := nsq.NewConsumer(topic, *channel, cfg)
	if err != nil {
		return nil, err
	}
	yigecf := make(map[string]*FileLogger)
	termchan := make(chan bool, 1)
	hupchan := make(chan bool, 1)
	Exitchan := make(chan int, 1)
	oooooc := make(chan *nsq.Message)
	cf := ConsumerFileLogger{
		C:          consumer,
		topic:      topic,
		termChan:   termchan,
		hupChan:    hupchan,
		ExitChan:   Exitchan,
		Otolc:      oooooc,
		CF:         yigecf,
		wholecount: 0,
	}
	consumer.AddHandler(&cf)

	err = consumer.ConnectToNSQDs(nsqdTCPAddrs)
	if err != nil {
		log.Fatal(err)
	}

	err = consumer.ConnectToNSQLookupds(lookupdHTTPAddrs)
	if err != nil {
		log.Fatal(err)
	}

	return &cf, nil
}

func (t *TopicDiscoverer) startTopicRouter(logger *ConsumerFileLogger) {
	t.wg.Add(1)
	defer t.wg.Done()
	go logger.Guga()
	<-logger.ExitChan
}

func (t *TopicDiscoverer) allowTopicName(pattern string, name string) bool {
	match, err := regexp.MatchString(pattern, name)
	if err != nil {
		return false
	}

	return match
}

func (t *TopicDiscoverer) syncTopics(addrs []string, pattern string) {
	newTopics, err := clusterinfo.New(nil, http_api.NewClient(nil, 2*time.Second, 5*time.Second)).GetLookupdTopics(addrs)
	if err != nil {
		log.Printf("ERROR: could not retrieve topic list: %s", err)
	}
	for _, topic := range newTopics {
		if _, ok := t.topics[topic]; !ok {
			if !t.allowTopicName(pattern, topic) {
				log.Println("Skipping topic ", topic, "as it didn't match required pattern:", pattern)
				continue
			}
			logger, err := newCustomConsumerFileLogger(topic, t.cfg)
			if err != nil {
				log.Printf("ERROR: couldn't create logger for new topic %s: %s", topic, err)
				continue
			}
			t.topics[topic] = logger
			go t.startTopicRouter(logger)
		}
	}
}

func (t *TopicDiscoverer) stop() {
	for k := range t.topics {
		fmt.Println("[Stopping][topic]", k)
		t.topics[k].termChan <- true
		fmt.Println("[Stopping][topic]", k, "[finished]")
	}
}

func (t *TopicDiscoverer) hup() {
	for k := range t.topics {
		t.topics[k].hupChan <- true
	}
}

func (t *TopicDiscoverer) watch(addrs []string, sync bool, pattern string) {
	ticker := time.Tick(*topicPollRate)
	for {
		select {
		case <-ticker:
			if sync {
				t.syncTopics(addrs, pattern)
			}
		case <-t.termChan:
			fmt.Println("[Press][Ctrl][+][C]")
			t.stop()
			t.wg.Wait()
			return
		case <-t.hupChan:
			fmt.Println("[HubChan][-][-][-]")
			t.hup()
		}
	}
}

func main() {
	cfg := nsq.NewConfig()

	// TODO: remove, deprecated
	flag.Var(&nsq.ConfigFlag{cfg}, "reader-opt", "(deprecated) use --consumer-opt")
	flag.Var(&nsq.ConfigFlag{cfg}, "consumer-opt", "option to passthrough to nsq.Consumer (may be given multiple times, http://godoc.org/github.com/nsqio/go-nsq#Config)")

	flag.Parse()

	if *cpuprofile {
		fmt.Println("[OPTION][PROFILE]{ok}")
		f, err := os.OpenFile(path.Join(*outputDir, "uba-join.prof"), os.O_RDWR|os.O_CREATE, 0644)
		if err != nil {
			log.Fatal(err)
		}
		defer f.Close()
		pprof.StartCPUProfile(f)
		defer pprof.StopCPUProfile()
	}

	if *showVersion {
		fmt.Printf("pubid nsq_to_file v%s\n", version.Binary)
		return
	}

	*pubidslice = `"` + *pubidslice

	if *channel == "" {
		log.Fatal("--channel is required")
	}

	var topicsFromNSQLookupd bool

	if len(nsqdTCPAddrs) == 0 && len(lookupdHTTPAddrs) == 0 {
		log.Fatal("--nsqd-tcp-address or --lookupd-http-address required.")
	}
	if len(nsqdTCPAddrs) != 0 && len(lookupdHTTPAddrs) != 0 {
		log.Fatal("use --nsqd-tcp-address or --lookupd-http-address not both")
	}

	if *gzipLevel < 1 || *gzipLevel > 9 {
		log.Fatalf("invalid --gzip-level value (%d), should be 1-9", *gzipLevel)
	}

	cfg.UserAgent = fmt.Sprintf("nsq_to_file/%s go-nsq/%s", version.Binary, nsq.VERSION)
	cfg.MaxInFlight = *maxInFlight

	discoverer := newTopicDiscoverer(cfg)

	signal.Notify(discoverer.hupChan, syscall.SIGHUP)
	signal.Notify(discoverer.termChan, syscall.SIGINT, syscall.SIGTERM)

	if len(topics) < 1 {
		if len(lookupdHTTPAddrs) < 1 {
			log.Fatal("use --topic to list at least one topic to subscribe to or specify at least one --lookupd-http-address to subscribe to all its topics")
		}
		topicsFromNSQLookupd = true
		var err error
		topics, err = clusterinfo.New(nil, http_api.NewClient(nil, 2*time.Second, 5*time.Second)).GetLookupdTopics(lookupdHTTPAddrs)
		if err != nil {
			log.Fatalf("ERROR: could not retrieve topic list: %s", err)
		}
	}

	for _, topic := range topics {
		if !discoverer.allowTopicName(*topicPattern, topic) {
			log.Println("Skipping topic", topic, "as it didn't match required pattern:", *topicPattern)
			continue
		}

		logger, err := newCustomConsumerFileLogger(topic, cfg)
		if err != nil {
			log.Fatalf("ERROR: couldn't create logger for topic %s: %s", topic, err)
		}
		discoverer.topics[topic] = logger
		go discoverer.startTopicRouter(logger)
	}

	discoverer.watch(lookupdHTTPAddrs, topicsFromNSQLookupd, *topicPattern)
}
