package main

import (
	"bytes"
	"encoding/json"
	"os"
	"path"
	"strings"
	"sync"
	"time"

	"../logger"
	"../util"
)

type Logic struct {
	tables  map[string]map[string]Store
	mu      sync.RWMutex
	stop    chan string
	done    chan string
	buffmap map[string]MsgBuffer
}

func (logic *Logic) Initial() {
	logger.SetOutput(*basepath+"/logs/", "crm-query", 3)
	// open tables in 15 days
	logic.tables = PrepareStore(*basepath, *DaysToKeepOpen)
	logic.buffmap = make(map[string]MsgBuffer, 0)
}

func (logic *Logic) Finalize() {
	// close all db
	var err error
	for date, _ := range logic.tables {
		for name, db := range logic.tables[date] {
			err = db.Close()
			util.ErrorHandler("close log file", err, func() error {
				return db.Close()
			})
			delete(logic.tables[date], name)
		}
		delete(logic.tables, date)
	}
	logger.Println("Program exit, close all db")

	// exit go routine
	logic.stop = make(chan string)
	logic.done = make(chan string)
	logic.stop <- "stop"
	<-logic.done
	// close logfile
	logger.Println("Close logfile")
	logger.Close()
}

func (logic *Logic) CloseDBEveryDay() {
	time_to_close_db := time.Tick(time.Duration(*IntervalToCloseDB) * time.Minute)
	time_to_read_kafka := time.Tick( 20 * time.Second )//time.Duration(*IntervalToReadKafka) * time.Minute)
	go func() {
		for {
			select {
			case <-logic.stop:
				logger.Println("Exit goroutine CloseDBEveryDay")
				logic.done <- "done"
				return
			case <-time_to_read_kafka:
				logger.Println("Read message from Kafka")
				if err := logic.consumeMsg(); err != nil {
					logger.Errorf("Fail to consume message from Kafka: %v\n", err)
				}
			case <-time_to_close_db:
				var err error
				logger.Printf("Start closing db before %d days", *DaysToKeepOpen)
				logic.mu.Lock()
				if err = closeOldDB(logic.tables); err != nil {
					logger.Errorf("Fail to close db: %v", err)
				}
				logic.mu.Unlock()
			default:
				break
			}
		}
	}()
}

func closeOldDB(tables map[string]map[string]Store) error {
	// date:dbname:boltdb
	for date, _ := range tables {
		fail := false
		dbdate, err := time.Parse("2006-01-02", date)
		if err != nil {
			return err
		}
		if dbdate.Before(time.Now().AddDate(0, 0, *DaysToKeepOpen*(-1))) {
			for name, db := range tables[date] {
				err := db.Close()
				if err != nil {
					logger.Errorf("Error occurs while closing db: data/" + date + "/" + name)
					fail = true
					break
				}
				delete(tables[date], name)
				logger.Println("Close db: data/" + date + "/" + name)
			}
			if !fail {
				delete(tables, date)
			}
		}
	}
	return nil
}

func (logic *Logic) OpenNGet(args Args) ([]map[string]string, error) {
	var ret []map[string]string
	mindate, maxdate, err := parseDateTime(args.conds)
	if err != nil {
		return nil, err
	}
	datelist := getDates(mindate, maxdate)
	for _, date := range datelist {
		logic.mu.Lock()
		if logic.tables[date][args.table] == nil {
			tablepath := path.Join(*basepath, "data", date)
			db, err := GetBoltStore(tablepath, args.table)
			if err != nil && err != util.FileNotExit {
				logger.Errorf("Fail to open db:%v", err)
			}
			if db != nil {
				if logic.tables[date] == nil {
					logic.tables[date] = make(map[string]Store)
				}
				logic.tables[date][args.table] = db
				logger.Printf("Open db: %s/%s", date, args.table)
			}
		}
		logic.mu.Unlock()

		if logic.tables[date][args.table] != nil {
			result, err := logic.tables[date][args.table].Get(args.conds)
			if err != nil {
				return nil, err
			} else {
				ret = mergeResult(ret, result)
			}
		}
	}
	return ret, nil
}

func (logic *Logic) buffWrite(db string, msg []byte) error {
	var err error
	if logic.buffmap[db].buff == nil {
		buff := &bytes.Buffer{}
		logic.buffmap[db] = MsgBuffer{buff: buff}
	}
	// lock
	_, bwerr := logic.buffmap[db].buff.Write([]byte(string(msg) + "\n"))
	if bwerr == bytes.ErrTooLarge ||
		logic.buffmap[db].buff.Len() > *MaxBuffSize {
		logger.Println("Try to write message from buffer to boltdb.")
		records := logic.buffmap[db].buff.String()
		records = strings.TrimRight(records, "\n")
		recordList := strings.Split(records, "\n")
		res := make([]map[string]string, 0)
		for _, record := range recordList {
			res, err = parseKafkaMsg([]byte(record), res)
			if err != nil {
				logger.Errorf("Fail to unmarshal json: %s. reason: %v", record, err)
			}
		}
		logic.batchPut(res, db)
		logic.buffmap[db].buff.Reset()
	}
	return bwerr
}

func (logic *Logic) batchPut(data []map[string]string, tablename string) {
	var err error
	var date time.Time
	res := make(map[string]([]map[string]string), 0)
	for _, record := range data {
		// check data validity
		if record["IP"] == "" || record["DATETIME"] == "" || record["STAFF_ID"] == "" {
			logger.Errorf("Data lacks of field IP/TETIME/STAFF_ID: %v\n", record)
			// return util.DataFormatErr
			continue
		}
		if len(record["DATETIME"]) == 14 {
			date, err = time.Parse("20060102150405", record["DATETIME"])
			if err != nil {
				logger.Errorf("Fail to parse DATETIME: %s", record["DATETIME"])
				// return util.DataFormatErr
				continue
			}
			record["DATETIME"] = record["DATETIME"] + "000"
		} else if len(record["DATETIME"]) == 17 {
			date, err = time.Parse("20060102150405000", record["DATETIME"])
			if err != nil {
				logger.Errorf("Fail to parse DATETIME: %s", record["DATETIME"])
				// return util.DataFormatErr
				continue
			}
		} else {
			logger.Errorf("Fail to parse DATETIME: %s", record["DATETIME"])
			// return util.DataFormatErr
			continue
		}
		dateStr := date.Format("2006-01-02")
		if res[dateStr] == nil {
			res[dateStr] = make([]map[string]string, 0)
		}
		res[dateStr] = append(res[dateStr], record)
	}
	for _date, record := range res {
		dateDir := path.Join(*basepath, "data", _date)
		if _, err := os.Stat(dateDir); os.IsNotExist(err) {
			os.MkdirAll(dateDir, 0755)
		}

		store := logic.tables[_date][tablename]

		if store == nil {
			store, err = NewBoltStore(dateDir, tablename)
			if err != nil {
				logger.Errorf("Fail to open boltdb: %v", err)
				// return err
			}

			if logic.tables[_date] == nil {
				logic.tables[_date] = make(map[string]Store)
			}
			logic.tables[_date][tablename] = store
		}

		if err = store.BatchPut(record); err != nil {
			logger.Errorf("Fail to write messages to boltdb: %v", err)
		} else {
			logger.Println("Write messages successfully to boltdb.")
		}
	}
}

func parseKafkaMsg(data []byte, ori []map[string]string) ([]map[string]string, error) {
	var err error
	// var date time.Time
	// var datetime string
	cur := make([]map[string]string, 0)
	if err = json.Unmarshal(data, &cur); err != nil {
		return ori, err
	}
	res := make([]map[string]string, len(ori)+len(cur))
	copy(res, ori)
	copy(res[len(ori):], cur)
	return res, nil
}
