package file

import (
	"fmt"
	"io"
	"log"
	"os"

	"path"
	"path/filepath"
	"sort"

	"strings"
	"sync"
	"time"

	"code.google.com/p/gomonitor/tsdb"
)

// TODO(jwall): Figure out a safe file locking strategy for better
// performance.
// TODO(jwall): Compaction?
// TODO(jwall): Cache in-memory indexes to the files?
// TODO(jwall):

type metricInfo struct {
	// metric to filename mapping
	file string
	// metric to data buffer.
	buffer []Data
	// lock for this metric.
	lock sync.Mutex
}

type Db struct {
	// directory to store timeseries data
	dir string
	// information about the metrics we are handling
	metrics map[tsdb.Metric]*metricInfo
	// blockSize for writeSeries calls
	blockSize int32
	// max duration any file can contain
	maxFileDuration time.Duration
}

func (db *Db) getDbFiles(metrics []tsdb.Metric, cond func(path string, fStart, fEnd time.Time) bool) (map[tsdb.Metric]sortableFiles, map[tsdb.Metric]int) {
	metricCount := len(metrics)
	index := make(map[tsdb.Metric]int, metricCount)
	fs := make(map[tsdb.Metric]sortableFiles, metricCount)
	for i, m := range metrics {
		index[m] = i
		prefix := filepath.Clean(db.dir + "/" + filePrefix(m))
		log.Printf("db.dir is %q", db.dir)
		log.Printf("metric is %v", m)
		log.Printf("filePrefix is %q", filePrefix(m))
		files := sortableFiles{}
		filepath.Walk(db.dir, func(path string, info os.FileInfo, err error) error {
			path = filepath.Clean(path)
			//log.Printf("Looking for prefix %q in file %q", prefix, path)
			if strings.HasPrefix(path, prefix) {
				fStart, fEnd, err := readTimespan(path, db.blockSize)
				if err == nil && cond(path, fStart, fEnd) {
					//log.Printf("Using file %q covering %s to %s for metric %v", path, fStart, fEnd, m)
					files = append(files, struct {
						t    int64
						path string
					}{fStart.UnixNano(), path})
				} else if err != nil {
					log.Printf("Error invalid file name for data: %q", path, err)
				}
			}
			return nil
		})
		sort.Sort(files)
		fs[m] = files
	}
	return fs, index
}

func (db *Db) GetMultipleSeries(metrics []tsdb.Metric, end time.Time, d time.Duration) []tsdb.SeriesResponse {
	metricCount := len(metrics)
	log.Printf("Asked time %q in nanoseconds %s", end, end.UnixNano())
	end = end.UTC()
	log.Printf("UTC time %q in nanoseconds %s", end, end.UnixNano())
	start := end.Add(-d)
	startNano := start.UnixNano()
	endNano := end.UnixNano()
	// TODO(jwall): cache this list of files?
	// Get our list of candidate files per metric
	for _, m := range metrics {
		if _, ok := db.metrics[m]; !ok {
			db.metrics[m] = &metricInfo{}
		}
		db.metrics[m].lock.Lock()
		defer db.metrics[m].lock.Unlock()
	}
	// TODO(jwall): cache this list of files?
	// Get our list of candidate files per metric
	fs, index := db.getDbFiles(metrics,
		func(path string, fStart, fEnd time.Time) bool {
			// start is in this file
			return timeRangeIntersects(fStart, fEnd, start, end)
		})
	vals := []tsdb.Datapoint{}
	// store up a datapoint for each value.
	for m, files := range fs {
		i := index[m]
		// readseries from each file.
		for _, sf := range files {
			f, err := os.Open(sf.path)
			if err == nil {
				dps, err := readSeries(f, start, d)
				if err != nil && err != io.EOF {
					log.Printf("Error reading series from file %q", err)
				}
				for _, dp := range dps {
					vals = append(vals, dataToDatapoint(dp, i, metricCount))
				}
				//log.Printf("Got %d vals from %q", len(dps), sf.path)
			} else {
				log.Printf("Error opening file for read %q", err)
			}
		}
	}
	for i, metric := range metrics {
		if len(db.metrics[metric].buffer) > 0 {
			for _, dp := range db.metrics[metric].buffer {
				//log.Printf("Testing buffered metric %s", dp.Ts)
				if dp.Ts >= startNano && dp.Ts <= endNano {
					//log.Printf("Found datapoint in buffer in our range %v", dp)
					vals = append(vals, dataToDatapoint(dp, i, metricCount))
				}
			}
		}
	}
	return tsdb.RespFactory(vals, metricCount)
}

func (db *Db) isFileFull(r io.ReadSeeker) (bool, error) {
	first, err := readFirst(r)
	if err != nil && err != io.EOF {
		return false, err
		// TODO(jwall): log error
	}
	last, err := readLast(r, db.blockSize)
	if err != nil && err != io.EOF {
		return false, err
		// TODO(jwall): log error
	}
	if first != nil && last != nil {
		span := time.Unix(0, last.Ts).Sub(time.Unix(0, first.Ts))
		return span >= db.maxFileDuration, nil
	}
	// Check if file is full according to maxFileDuration.
	return false, nil
}

func (db *Db) add(metric tsdb.Metric, value float64, t time.Time) error {
	// Identify file to write to.
	dp := Data{Value: value, Ts: t.UnixNano()}
	mi := db.metrics[metric]
	mi.buffer = append(mi.buffer, dp)
	if int32(len(mi.buffer)) == db.blockSize {
		tt := time.Unix(0, mi.buffer[0].Ts)
		f, err := db.getFile(metric, mi, tt)
		if err != nil {
			log.Printf("Error getting file %q %q", mi.file, err)
			return nil
		}
		defer f.Close()
		log.Printf("flushing buffer for metric %v to file %q", metric, mi.file)
		writeSeries(f, mi.buffer, db.blockSize)
		// clear the buffer
		mi.buffer = mi.buffer[:0]
	}
	return nil
}

func (db *Db) Add(metric tsdb.Metric, value float64, t time.Time) error {
	if _, ok := db.metrics[metric]; !ok {
		db.metrics[metric] = &metricInfo{}
		db.metrics[metric].lock.Lock()
		defer db.metrics[metric].lock.Unlock()
	}
	return db.add(metric, value, t.UTC())
}

func New(dir string, blockSize int32, maxDuration time.Duration, ms ...tsdb.Metric) *Db {
	db := &Db{dir: dir, metrics: make(map[tsdb.Metric]*metricInfo), blockSize: blockSize, maxFileDuration: maxDuration}
	for _, m := range ms {
		log.Printf("Ading metric %v", m)
		db.metrics[m] = &metricInfo{}
	}
	return db
}

func (db *Db) Compact() {
	//panic("NOT READY YET!!!")
	// TODO(jwall): collect these into a map keyed by metric
	log.Printf("Compacting Database files")
	metrics := make([]tsdb.Metric, 0, len(db.metrics))
	for m, mi := range db.metrics {
		metrics = append(metrics, m)
		mi.lock.Lock()
		defer mi.lock.Unlock()
	}
	mfs, _ := db.getDbFiles(metrics, func(path string, fStart, fEnd time.Time) bool {
		return true
	})
	for m, mfiles := range mfs {
		var buf []Data
		for _, mf := range mfiles {
			// TODo(jwall): Move to helper function
			//log.Printf("Processing %q", mf.path)
			f, err := os.Open(mf.path)
			if err != nil {
				log.Printf("Failed to open %q skipping...", mf.path)
				// TODO(jwall): move the file?
				continue
			}
			if err != nil {
				log.Printf("Failed to read timespan from %q skipping...", mf.path)
				f.Close()
				// TODO(jwall): move the file?
				continue
			}
			ds, err := readAll(f)
			if err != nil && err != io.EOF {
				f.Close()
				continue
			}
			f.Close()
			log.Printf("Processing %d datapoints from %q", len(ds), mf.path)
			baseName := path.Base(mf.path)
			bakName := path.Join(db.dir, "tsdb.bak."+baseName)
			err = os.Rename(mf.path, bakName)
			if err != nil {
				log.Fatalf("Failed to rename %q to %q...", mf.path, bakName)
			}
			for i, d := range ds {
				if len(buf) == 0 || time.Unix(0, d.Ts).Sub(time.Unix(0, buf[0].Ts)) <= db.maxFileDuration {
					buf = append(buf, d)
				} else {
					//func() {
					// TODO(jwall): open new file from metric and timestamp
					func() {
						f, err := os.OpenFile(fileName(db.dir, m, time.Unix(0, buf[0].Ts)), os.O_CREATE|os.O_RDWR|os.O_APPEND, os.ModePerm)
						if err != nil {
							log.Printf("Failed to open file %q", err)
						}
						defer f.Close()
						writeAll(f, buf, db.blockSize)
						log.Printf("Flushing %d datapoints to %q", len(buf), mf.path)
						buf = ds[i:]
					}()
				}
			}
		}
		f, err := os.OpenFile(fileName(db.dir, m, time.Unix(0, buf[0].Ts)), os.O_CREATE|os.O_RDWR|os.O_APPEND, os.ModePerm)
		if err != nil {
			log.Printf("Failed to open file %q", err)
		}
		log.Printf("Flushing final %d datapoints to %q", len(buf), f.Name())
		writeAll(f, buf, db.blockSize)
	}
}

func (db *Db) getFile(metric tsdb.Metric, mi *metricInfo, t time.Time) (*os.File, error) {
	if mi.file == "" {
		mi.file = fileName(db.dir, metric, t)
	}
	f, err := os.OpenFile(mi.file, os.O_CREATE|os.O_RDWR|os.O_APPEND, os.ModePerm)
	if err != nil {
		return nil, fmt.Errorf("Error opening file %q %q", mi.file, err)
	}
	if full, err1 := db.isFileFull(f); full {
		f.Close()
		if err1 != nil && err1 != io.EOF {
			// TODO(jwall): log error
			log.Printf("Error checking file %q full status %q", mi.file, err)
		}
		// start new file and set to current for that metric
		mi.file = fileName(db.dir, metric, t)
		f, err = os.OpenFile(db.metrics[metric].file, os.O_CREATE|os.O_RDWR|os.O_APPEND, os.ModePerm)
	}
	return f, err
}

// Shutdown ensures that any stored data in memory buffers gets
// flushed to disk.
// Usually used by adding a defer db.Shutdown after a call to New.
func (db *Db) Shutdown() {
	log.Printf("Shutting down...")
	for metric, mi := range db.metrics {
		log.Printf("cleaning up metric %v", metric)
		if len(mi.buffer) > 0 {
			t := time.Unix(0, mi.buffer[0].Ts)
			f, err := db.getFile(metric, mi, t)
			if err != nil {
				log.Printf("Error getting file %q %q", mi.file, err)
				return
			}
			defer f.Close()
			log.Printf("Flushing buffer for %v", metric)
			writeSeries(f, mi.buffer, db.blockSize)
			// clear the buffer
			mi.buffer = mi.buffer[:0]
		}
	}
}

func (db *Db) Ticker(metrics []tsdb.Metric, d time.Duration) (chan<- bool, <-chan *tsdb.SeriesResponse) {
	for _, m := range metrics {
		if _, ok := db.metrics[m]; !ok {
			db.metrics[m] = &metricInfo{}
		}
		db.metrics[m].lock.Lock()
		defer db.metrics[m].lock.Unlock()
	}
	stopCh := make(chan bool)
	respCh := make(chan *tsdb.SeriesResponse)
	tCh := time.NewTicker(d)
	metricCount := len(metrics)
	go func() {
		defer close(respCh)
		for {
			select {
			case <-tCh.C:
				vals := []tsdb.Datapoint{}
				for i, m := range metrics {
					func() {
						// locking is very important so we introduce a
						// function scope and use defer
						db.metrics[m].lock.Lock()
						defer db.metrics[m].lock.Unlock()
						if db.metrics[m].file != "" {
							f, err := os.Open(db.metrics[m].file)
							if err == nil {
								dp, err := readLast(f, db.blockSize)
								if err == nil || err == io.EOF {
									vals = append(vals, dataToDatapoint(*dp, i, metricCount))
									f.Close()
								}
							}
						}
					}()
				}
				for _, r := range tsdb.RespFactory(vals, metricCount) {
					respCh <- &r
				}
			case <-stopCh:
				return
			}
		}
	}()
	return stopCh, respCh
}
