package common

import (
	//"bytes"
	"container/list"
	"fmt"
	// "github.com/syndtr/goleveldb/leveldb"
	// leveldb_filter "github.com/syndtr/goleveldb/leveldb/filter"
	// leveldb_opt "github.com/syndtr/goleveldb/leveldb/opt"
	// leveldb_util "github.com/syndtr/goleveldb/leveldb/util"
	//"io/ioutil"
	//"path/filepath"
	"sort"
	"strings"
	"sync"
	"time"
)

type DataPointFilter func(dp DataPoint) bool //true: tobe filtered

func SplitMetricName(name string) (a, b string) {
	if s := strings.SplitN(name, ".", 3); len(s) == 3 {
		a = strings.Replace(s[1], "_", ".", -1)
		b = s[2]
	}
	return
}

type Cache interface {
	Add(dp DataPoint)
	shrink(size int)
	QueryRange(start, end uint32) (ret []DataTuple, err error)
	LastDatapoint() (ret DataTuple)
	LastUpdateTime() int64
	//QueryMetricNamesByPrifix(prefix string) (ret []string, err error)
}

type CacheManager struct {
	Filters       []DataPointFilter
	mu            sync.RWMutex
	unit          TimeUnit
	maxSplitCount int
	caches        map[string]map[string]Cache // ip->
	ch            chan DataPoint
}

func NewCacheManager(unit, splitCount, shrinkInterval int) *CacheManager {
	count := splitCount
	if count == 0 {
		count = 3
	}
	timeUnit := TimeUnit(unit)
	cm := &CacheManager{
		unit:          timeUnit,
		maxSplitCount: count, //num of splits
		caches:        make(map[string]map[string]Cache),
		ch:            make(chan DataPoint),
	}
	interval := shrinkInterval
	if interval < 600 {
		interval = 600
	}
	go func() {
		ticker := time.NewTicker(time.Duration(shrinkInterval) * time.Second)
		for {
			select {
			case <-ticker.C:
				cm.shrink()
			case dp := <-cm.ch:
				cm.Add(dp)
			}
		}
	}()
	return cm
}

func (cm *CacheManager) AddFilter(filter DataPointFilter) {
	cm.Filters = append(cm.Filters, filter)
}

func (cm *CacheManager) Snapshot() map[string]map[string]DataTuple {
	cm.mu.RLock()
	defer cm.mu.RUnlock()
	ret := make(map[string]map[string]DataTuple)
	for ip, m1 := range cm.caches {
		ret[ip] = make(map[string]DataTuple)
		for metric, v := range m1 {
			if d := v.LastDatapoint(); d.Time != 0 {
				ret[ip][metric] = d
			}
		}
	}
	return ret
}

func (cm *CacheManager) shrink() {
	cm.mu.RLock()
	defer cm.mu.RUnlock()
	for _, m := range cm.caches {
		for _, c := range m {
			c.shrink(cm.maxSplitCount)
		}
	}
}

func (cm *CacheManager) QueryByIP(ip string) (ret string, ts int64) {
	cm.mu.RLock()
	defer cm.mu.RUnlock()
	if m, ok := cm.caches[ip]; ok {
		for metric, cache := range m {
			if last := cache.LastUpdateTime(); ts < last {
				ret = metric
				ts = last
			}
		}
	}
	return
}

func (cm *CacheManager) QueryRange(ip, metric string, start, end uint32) (ret []DataTuple, err error) {
	cm.mu.RLock()
	defer cm.mu.RUnlock()
	if v, ok := cm.caches[ip]; ok {
		if v1, ok1 := v[metric]; ok1 {
			ret, err = v1.QueryRange(start, end)
		}
	}
	return
}

func (cm *CacheManager) QueryMetricNames(base string, matches []string) (ret []string, err error) {
	cm.mu.Lock()
	defer cm.mu.Unlock()
	if _, ok := cm.caches[base]; !ok {
		err = fmt.Errorf("no match")
		return
	}
	if len(cm.caches[base]) == 0 {
		err = fmt.Errorf("no match")
		return
	}
	for k := range cm.caches[base] {
		for _, match := range matches {
			if strings.Contains(k, match) {
				ret = append(ret, k)
			}
		}
	}
	return
}

func (cm *CacheManager) GetCache(name string) Cache {
	ip, metric := SplitMetricName(name)
	cm.mu.Lock()
	defer cm.mu.Unlock()
	if v, ok := cm.caches[ip]; !ok || v == nil {
		cm.caches[ip] = make(map[string]Cache)
	}
	if _, ok := cm.caches[ip][metric]; !ok {
		cm.caches[ip][metric] = NewMetricCache(cm.unit)
	}
	return cm.caches[ip][metric]
}

func (cm *CacheManager) Add(dp DataPoint) {
	for _, filter := range cm.Filters {
		if filter(dp) {
			return
		}
	}
	cache := cm.GetCache(dp.Name)
	cache.Add(dp)
}

func (cm *CacheManager) DataChan() chan<- DataPoint {
	return cm.ch
}

type MetricCache struct {
	mu             sync.RWMutex
	unit           TimeUnit
	maxSplit       TimeSplit
	caches         map[TimeSplit]*list.List // name ->
	lastUpdateTime int64
}

func NewMetricCache(unit TimeUnit) Cache {
	return &MetricCache{
		unit:   unit,
		caches: make(map[TimeSplit]*list.List),
	}
}

func (m *MetricCache) LastUpdateTime() int64 {
	m.mu.RLock()
	defer m.mu.RUnlock()
	return m.lastUpdateTime
}

func (m *MetricCache) QueryRange(start, end uint32) (ret []DataTuple, err error) {
	m.mu.RLock()
	defer m.mu.RUnlock()
	splits := m.unit.GetTimeSplits(start, end)
	for _, split := range splits {
		if v, ok := m.caches[split]; ok {
			shouldNotCheck := !split.Contains(start) && !split.Contains(end)
			for e := v.Front(); e != nil; e = e.Next() {
				node := e.Value.(OffsetValue)
				ts := uint32(split.base) + uint32(node.offset)
				if shouldNotCheck || (ts <= end && ts >= start) {
					ret = append(ret, DataTuple{node.value, ts})
				}
			}
		}
	}
	if len(ret) == 0 {
		err = fmt.Errorf("no value")
	}
	return
}

func (m *MetricCache) shrink(size int) {
	m.mu.Lock()
	defer m.mu.Unlock()
	if len(m.caches) > size {
		keys := make([]TimeSplit, len(m.caches))
		i := 0
		for k := range m.caches {
			keys[i] = k
			i++
		}
		splits := TimeSplits(keys)
		sort.Sort(splits)
		for i = 0; i < len(keys)-size; i++ {
			delete(m.caches, keys[i])
		}
	}
}

func (m *MetricCache) LastDatapoint() (ret DataTuple) {
	if v, ok := m.caches[m.maxSplit]; ok && v.Len() != 0 {
		d := v.Back().Value.(OffsetValue)
		ret = d.ToDataTuple(m.maxSplit)
	}
	return
}

func (m *MetricCache) Add(dp DataPoint) {
	split, offset := m.unit.GetTimeSplit(dp.Time)
	value := OffsetValue{offset, dp.Val}
	m.mu.Lock()
	defer m.mu.Unlock()
	if v, ok := m.caches[split]; !ok {
		if split.CompareTo(m.maxSplit) > 0 {
			m.maxSplit = split
		}
		m.caches[split] = list.New()
		m.caches[split].PushBack(value)
	} else {
		var e *list.Element
		for e = v.Back(); e != nil && e.Value.(OffsetValue).offset > offset; e = e.Prev() {
		}
		if e != nil {
			v.InsertAfter(value, e)
		} else {
			v.PushBack(value)
		}
	}
	m.lastUpdateTime = time.Now().Unix()
}
