/*
 * Copyright (c) 2022. China Mobile(SuZhou)Software Technology Co.,Ltd.
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

package meta

import (
	"context"
	"encoding/json"
	"fmt"
	"strconv"
	"strings"
	"sync"
	"sync/atomic"
	"syscall"
	"time"

	"github.com/go-redis/redis/v8"
	cmap "github.com/orcaman/concurrent-map"
)

const (
	WalName     = "pg_wal"
	BaseName    = "base"
	GlobalName  = "global"
	ControlName = "pg_control"
	TblSpcName  = "pg_tblspc"
	TmpXlog     = "xlogtemp"
	//value is 1G / 8k
	blocksPerSeg       = 1 << 17
	hKey               = "specialdir"
	AllDbName          = "alldbs"
	SizeOfXLogLongPHD  = 40
	SizeOfXLogShortPHD = 24
	BoostPrefix        = "boost"
	maxPageStoredInMem = 100000
	//when a page last visited is more than 60s, than replace it.
	MaxOverTime = 60
)

var (
	//pg_wal directory inode for he3pg
	WalInode Ino

	//base directory inode for he3pg
	BaseInode Ino

	//global directory inode for he3pg
	GlobalInode Ino

	//pg_tblspc directory inode for he3pg
	TblSpcInode Ino

	//pg_control file inode for he3pg
	ControlInode Ino

	//key is all db's inode for he3pg
	AllDbInode = make(map[Ino]string)

	rdbClient *redis.Client

	LatestLsn uint64

	//key is dbNode_relNode_forkno, value is blockno set
	PagesWithWal *PagesPref
	//PagesWithWal cmap.ConcurrentMap

	//key is dbNode_relNode_forno_blockno
	//PagesToFlush *NeedFlush
	NeedFlushPages [2]map[string]Empty
	NeedFlushIdx   int32

	//key is dbNode_relNode_forno_blockno
	//PagesBuf *PagesInfo
	Pages cmap.ConcurrentMap

	empty    Empty
//	TblSpc   cmap.ConcurrentMap
	TblSpcSymToRelative cmap.ConcurrentMap
	PgTblSpc cmap.ConcurrentMap
)

type Empty struct{}

type Prefix struct {
	dbNode  string
	relNode string
	forkno  int
	blockno int
}

type PageInfo struct {
	wals      []WalLoc
	lastVisit int64
	visits    int
	flushed   int
}

type WalLoc struct {
	Inode       Ino
	Offset      uint64
	Length      int
	Lsn         uint64
	LinkedInode Ino
}

type Set struct {
	s  map[int]Empty
	mu sync.RWMutex
}

func (s *Set) set(key int) {
	s.mu.Lock()
	defer s.mu.Unlock()
	s.s[key] = empty
}

func (s *Set) get(key int) bool {
	s.mu.RLock()
	defer s.mu.RUnlock()
	_, ok := s.s[key]
	return ok
}

type PagesPref struct {
	Pages map[string]*Set
	mu    sync.RWMutex
}

func (pp *PagesPref) get(key string) *Set {
	pp.mu.RLock()
	defer pp.mu.RUnlock()
	if page, ok := pp.Pages[key]; ok {
		return page
	}
	return nil
}

func (pp *PagesPref) set(key string, page *Set) {
	pp.mu.Lock()
	defer pp.mu.Unlock()
	pp.Pages[key] = page
}

type NeedFlush struct {
	Prefix map[string]Empty
	mu     sync.RWMutex
}

func (f *NeedFlush) reset() {
	f.mu.Lock()
	defer f.mu.Unlock()
	f.Prefix = make(map[string]Empty)
}

func (f *NeedFlush) set(key string) {
	f.mu.Lock()
	defer f.mu.Unlock()
	if _, ok := f.Prefix[key]; !ok {
		f.Prefix[key] = empty
	}

}

//type PagesInfo struct {
//	Pages map[string]*PageInfo
//	mu sync.RWMutex
//}
//
//func (p *PagesInfo) get(key string) *PageInfo {
//	p.mu.RLock()
//	defer p.mu.RUnlock()
//	if page, ok := p.Pages[key]; ok {
//		return page
//	}
//	return nil
//}
//
//func (p *PagesInfo) set(key string, page *PageInfo) {
//	p.mu.Lock()
//	defer p.mu.Unlock()
//	p.Pages[key] = page
//}

func DealSpeicalHe3node(name string, inode Ino, parentInode Ino) {
	switch name {
	case WalName:
		if WalInode != 0 {
			logger.Errorf("pg_wal directory(%d) has been created", WalInode)
			return
		}
		WalInode = inode
		err := rdbClient.HSet(context.Background(), hKey, WalName, WalInode.String()).Err()
		if err != nil {
			logger.Errorf("pg_wal inode(%v) failed to store in redis: %v", WalInode, err)
		}
		return
	case BaseName:
		if BaseInode != 0 {
			logger.Errorf("base directory(%d) has been created", BaseInode)
			return
		}
		BaseInode = inode
		err := rdbClient.HSet(context.Background(), hKey, BaseName, BaseInode.String()).Err()
		if err != nil {
			logger.Errorf("base inode(%v) failed to store in redis: %v", BaseInode, err)
		}
		return
	case GlobalName:
		if GlobalInode != 0 {
			logger.Errorf("global directory(%d) has been created", GlobalInode)
			return
		}
		GlobalInode = inode
		err := rdbClient.HSet(context.Background(), hKey, GlobalName, GlobalInode.String()).Err()
		if err != nil {
			logger.Errorf("global inode(%v) failed to store in redis: %v", GlobalInode, err)
		}
		return
	case TblSpcName:
		if TblSpcInode != 0 {
			logger.Errorf("pg_tblspc directory(%d) has been created", TblSpcInode)
			return
		}
		TblSpcInode = inode
		err := rdbClient.HSet(context.Background(), hKey, TblSpcName, TblSpcInode.String()).Err()
		if err != nil {
			logger.Errorf("pg_tblspc inode(%v) failed to sotore in redis: %v", TblSpcInode, err)
		}
		return
	}
	if parentInode == BaseInode {
		AllDbInode[inode] = name
		err := rdbClient.HSet(context.Background(), AllDbName, name, inode.String()).Err()
		if err != nil {
			logger.Errorf("db(%v) inode(%v) failed to store in redis: %v", name, inode, err)
		}
	} else if _, ok := PgTblSpc.Get(strconv.FormatUint(uint64(parentInode), 10)); ok {
		AllDbInode[inode] = name
		err := rdbClient.HSet(context.Background(), AllDbName, name, inode.String()).Err()
		if err != nil {
			logger.Errorf("db(%v) inode(%v) failed to store in redis: %v", name, inode, err)
		}
	}
}

func (m *dbMeta) GetEgde(ctx Context, e *Edge) syscall.Errno {
	err := m.en.doGetEdge(ctx, e)
	if err == 0 {
		return 0
	}
	return errno(err)
}
func formatPrefix(pref *Prefix) string {
	return fmt.Sprintf("%s_%s_%d_%d", pref.dbNode, pref.relNode, pref.forkno, pref.blockno)
}

func (m *dbMeta) FileIsPageData(ino, parent Ino, fname string, offset uint64, pref *Prefix) ([]WalLoc, bool, syscall.Errno) {
	dbname, ok := AllDbInode[parent]
	if parent != GlobalInode && !ok {
		return nil, false, 0
	}

	//var edg = edge{Parent: parent, Inode: ino}

	//err := m.db.Where("parent = ? and inode = ?", parent, ino).Cols("inode", "name").Find(&edg)
	//exist, err := m.db.Get(&edg)
	//if !exist {
	//	return nil, false, errno(fmt.Errorf("inode %v cannot found in meta", ino))
	//}
	//if err != nil {
	//	logger.Errorf("query failed: %v", err)
	//	return nil, false, errno(err)
	//}
	//
	//fname := string(edg.Name)
	var err error
	if string(fname[0]) >= "0" && string(fname[0]) <= "9" {
		if parent == GlobalInode {
			pref.dbNode = "0"
		} else {
			pref.dbNode = dbname
		}
		ns := strings.Split(fname, "_")
		if len(ns) > 1 {
			switch ns[1] {
			case "vm":
				pref.forkno = 2
			case "fsm":
				pref.forkno = 1
			}
		}

		segs := strings.Split(ns[0], ".")
		var segno int
		if len(segs) > 1 {
			segno, err = strconv.Atoi(segs[1])
			if err != nil {
				return nil, false, errno(err)
			}
		}
		pref.blockno = int(offset>>13) + segno*blocksPerSeg
		pref.relNode = segs[0]

		if pref.relNode == "" {
			return nil, false, errno(fmt.Errorf("cannot find inode(%v).", ino))
		}

		pagePref := fmt.Sprintf("%v_%v_%v", pref.dbNode, pref.relNode, pref.forkno)
		pages := PagesWithWal.get(pagePref)
		if pages != nil {
			if !pages.get(pref.blockno) {
				return nil, true, 0
			}
		} else {
			return nil, true, 0
		}

		key := formatPrefix(pref)

		//page := PagesBuf.get(key)
		//if page != nil {
		//	page.visits++
		//	page.lastVisit = time.Now()
		//	return page.wals, true, 0
		//}
		p, ok := Pages.Get(key)
		if ok {
			page := p.(*PageInfo)
			page.visits++
			page.lastVisit = time.Now().Unix()
			//Pages.Set(key, page)
			return page.wals, true, 0
		}

		//result, err := rdbClient.Get(context.Background(), key).Bytes()
		vals, err := rdbClient.LRange(context.Background(), key, 0, -1).Result()
		if err == redis.Nil {
			PagesWithWal.mu.Lock()
			delete(PagesWithWal.Pages[pagePref].s, pref.blockno)
			if len(PagesWithWal.Pages[pagePref].s) == 0 {
				delete(PagesWithWal.Pages, pagePref)
			}
			PagesWithWal.mu.Unlock()
			return nil, true, 0
		}
		if err != nil {
			return nil, false, errno(err)
		}
		var wals []WalLoc
		for _, v := range vals {
			var wal WalLoc
			err := json.Unmarshal([]byte(v), &wal)
			if err != nil {
				return nil, false, errno(fmt.Errorf("existing format is broken: %s", err))
			}
			wals = append(wals, wal)
		}

		return wals, true, 0
	}
	return nil, false, 0
}

func CleanPagesLink(lastLsn uint64) {
	ctx := context.Background()
	//firstly, find the key which compute node boost basic page to consistent lsn
	keys := rdbClient.Keys(ctx, fmt.Sprintf("%v_%v_*", BoostPrefix, lastLsn)).Val()
	logger.Infof("keys is %v", keys)
	if len(keys) == 0 {
		return
	}
	if len(keys) != 1 {
		logger.Errorf("find %v_ prefix in redis more than 1.", lastLsn)
	}
	key := keys[0]
	//secondly, find all pages in this boost
	pages := rdbClient.SMembers(ctx, key).Val()
	//logger.Infof("pages is %v", pages)
	if len(pages) == 1 && pages[0] == "null" {
		logger.Infof("key %v does not contain page data", key)
	} else {
		for id, pageid := range pages {
			//find all related wals with page
			vals := rdbClient.LRange(ctx, pageid, 0, -1).Val()
			for _, v := range vals {
				var wal WalLoc
				err := json.Unmarshal([]byte(v), &wal)
				if err != nil {
					logger.Errorf("unmarshal wal failed: %v", err)
					return
				}
				//if exist wal's lsn is less than lastLsn, trim wals list from the first lsn than more than lastLsn.
				if wal.Lsn > lastLsn {
					if id >= 1 {
						rdbClient.LTrim(ctx, pageid, int64(id), -1)
					}
					continue
				}
				//if the last wal lsn is less than lastlsn, represent this page don't need wals.
				//so we can clean this page's wals.
				if id == len(vals)-1 && wal.Lsn <= lastLsn {
					rdbClient.Del(ctx, pageid)
				}

			}
		}
	}


	lsns := strings.Split(key, "_")
	if len(lsns) > 2 {
		startLsn, err := strconv.Atoi(lsns[2])
		if err != nil {
			logger.Errorf("fail to convert %v to int: %v.\n", startLsn, err)
			return
		}
		//when we start to clean pages link, but compute node has boosted some times.
		//we should clean all keys of every boosted startLsn.
		if LatestLsn < uint64(startLsn) {
			CleanPagesLink(uint64(startLsn))
		}
	}

	//finally, clean the relation of lsn and pages.
	rdbClient.Del(ctx, key)
	LatestLsn = lastLsn

}

func (m *dbMeta) GetPrentInodeList(ctx Context) []Ino {
	InoList := m.en.doGetPrentInodeList(ctx)
	return InoList
}
func (m *dbMeta) GetAllEdges(ctx Context) []edge {
	EdgeList := m.en.doGetAllEdges(ctx)
	return EdgeList
}
func (m *dbMeta) GetDecendants(ctx Context,parent Ino) []edge {
	EdgeList := m.en.doGetDecendants(ctx,parent)
	return EdgeList
}

//only store partial page and wals's link relation in memory.
//when puting a new relation in memory, we need abandon one in memroy.
//func needTransUnusedPage(pageid string, newpage *PageInfo) {
//	ctx := context.Background()
//	var cnt int
//	var minVisits int
//	var minVKey string
//	for key, page := range Pages {
//		old := time.Now().Add(-MaxOverTime * time.Second)
//		//if one last visite time is more than MaxOverTime second, directly delete it or save it to redis.
//		if page.lastVisit.Before(old) {
//			if _, err := os.Stat("standby.signal"); err == nil {
//				v, lerr := rdbClient.LIndex(ctx, key, -1).Bytes()
//				if lerr == redis.Nil {
//					//flush it to redis
//					if page.flushed < len(page.wals) {
//						if err = rdbClient.RPush(ctx, key, page.wals[page.flushed:]).Err(); err == nil {
//							delete(Pages, key)
//							Pages[pageid] = newpage
//							break
//						}
//					}
//				}
//				var wal WalLoc
//				err = json.Unmarshal((v), &wal)
//				if err != nil {
//					logger.Errorf("unmarshal wal failed: %v", err)
//					return
//				}
//				length := len(page.wals)
//				if page.wals[length - 1].Lsn > wal.Lsn {
//					for i := length -1; i > 0; i-- {
//						if page.wals[i-1].Lsn < wal.Lsn {
//							//flush it to redis
//							if err = rdbClient.RPush(ctx, key, page.wals[i:]).Err(); err == nil {
//								delete(Pages, key)
//								Pages[pageid] = newpage
//								break
//							}
//						}
//					}
//				}
//				delete(Pages, key)
//				Pages[pageid] = newpage
//				break
//			}else if os.IsNotExist(err) {
//				if page.flushed < len(page.wals) {
//					//flush it to redis
//					if err = rdbClient.RPush(ctx, key, page.wals[page.flushed:]).Err(); err == nil {
//						delete(Pages, key)
//						Pages[pageid] = newpage
//						break
//					}
//				}
//			}
//
//		}
//		if cnt == 0 || page.visits < minVisits {
//			minVisits = page.visits
//			minVKey = key
//		}
//		cnt++
//	}
//	minVPage := Pages[minVKey]
//	if minVPage.flushed < len(minVPage.wals) {
//		//flush it to redis
//		if err := rdbClient.RPush(ctx, minVKey, minVPage.wals[minVPage.flushed:]).Err(); err == nil {
//			delete(Pages, minVKey)
//			Pages[pageid] = newpage
//		}
//	}
//}

func CleanLocalLinks(lastlsn uint64) {
	var pageNeedDel []string
	pages := Pages.Items()
	for pageid, p := range pages {
		page := p.(*PageInfo)
		//logger.Infof("pageid %v, page info %v", pageid, page)
		nwals := len(page.wals)
		if page.wals[nwals-1].Lsn < lastlsn {
			pageNeedDel = append(pageNeedDel, pageid)
		//} else {
		//	for index := 0; index < len(page.wals); index++ {
		//		if page.wals[index].Lsn > lastlsn {
		//			if index != 0 {
		//				page.wals = page.wals[index:]
		//				if page.flushed > index {
		//					page.flushed -= index
		//				} else {
		//					page.flushed = 0
		//				}
		//			}
		//			break
		//		}
		//	}
		}

	}

	//PagesBuf.mu.Lock()
	for i := 0; i < len(pageNeedDel); i++ {
		pageid := pageNeedDel[i]
		Pages.Remove(pageid)
		PagesWithWal.mu.Lock()
		last := strings.LastIndex(pageid, "_")
		pagePref := pageid[:last]
		blk, err := strconv.Atoi(pageid[last+1 :])
		if err != nil {
			logger.Errorf("string trans to int failed: %v", err)
			PagesWithWal.mu.Unlock()
			return
		}
		delete(PagesWithWal.Pages[pagePref].s, blk)
		if len(PagesWithWal.Pages[pagePref].s) == 0 {
			delete(PagesWithWal.Pages, pagePref)
		}
		PagesWithWal.mu.Unlock()
		//delete(PagesBuf.Pages, pageNeedDel[i])
	}
	//PagesBuf.mu.Unlock()
}

func TrimLocalLink(pref *Prefix, index int, pageLsn uint64) {
	key := formatPrefix(pref)
	if p, ok := Pages.Get(key); ok {
		page := p.(*PageInfo)
		if index < len(page.wals) {
			if page.wals[index-1].Lsn > pageLsn {
				logger.Infof("page(%v) link has trimed: index %v, pagelsn %v", key, index, pageLsn)
				return
			}
			page.wals = page.wals[index:]
			if page.flushed > index {
				page.flushed -= index
			} else {
				page.flushed = 0
			}
		}
		//Pages.Set(key, page)
	}

}

func FlushLinksToRedis() {
	ctx := context.Background()
	//logger.Infoln("start to flush links to redis.")
	//tmp := NeedFlushPages.Items()
	//PagesToFlush.mu.RLock()
	//tmp := make(map[string]Empty)
	//for pageid := range PagesToFlush.Prefix {
	//	logger.Infoln("pageid is ", pageid)
	//	tmp[pageid] = empty
	//}
	//PagesToFlush.mu.RUnlock()
	tmp := NeedFlushIdx
	other := NeedFlushIdx ^ 1
	olen := len(NeedFlushPages[other])
	if olen > 0 {
		logger.Infof("links %v is too many, it is still flushing links to redis, so we try in next time.", olen)
		return
	}
	atomic.StoreInt32(&NeedFlushIdx, other)
	pages := NeedFlushPages[tmp]
	//logger.Infof("need to flush pages(idx: %v, tmp: %v): %v ", NeedFlushIdx,tmp, NeedFlushPages)
	if len(pages) > 0 {
		//if _, err := os.Stat("standby.signal"); os.IsNotExist(err) {
		for pageid, _ := range pages {

			if p, ok := Pages.Get(pageid); ok {
				page := p.(*PageInfo)
				//logger.Infoln("page's wal ", page.wals)
				flushed := page.flushed
				//logger.Infof("page flushed %v, wals %v", page.flushed, page.wals)
				if flushed < len(page.wals) {
					var bs []string
					v, lerr := rdbClient.LIndex(ctx, pageid, -1).Bytes()
					if lerr == redis.Nil {
						//flush it to redis
						for i := flushed; i < len(page.wals); i++ {
							b, err := json.Marshal(&page.wals[i])
							if err != nil {
								logger.Errorf("marshal wal(%v) failed: %v", page.wals[i], err)
								return
							}
							bs = append(bs, string(b))
						}
						if len(bs) > 0 {
							err := rdbClient.RPush(ctx, pageid, bs).Err()
							if err != nil {
								logger.Errorf("push links to redis failed: %v", err)
							}
							page.flushed = len(page.wals)
							//Pages.Set(pageid, page)
						}
						continue
					}
					if lerr != nil {
						logger.Errorf("fail to query page(%v), err is %v", pageid, lerr)
						return
					}

					var wal WalLoc
					err := json.Unmarshal((v), &wal)
					if err != nil {
						logger.Errorf("unmarshal wal(%v) failed: %v", string(v), err)
						return
					}
					for i := flushed; i < len(page.wals); i++ {
						if page.wals[i].Lsn > wal.Lsn {
							b, err := json.Marshal(&page.wals[i])
							if err != nil {
								logger.Errorf("marshal failed: %v", err)
								return
							}
							bs = append(bs, string(b))
						}
					}
					if len(bs) > 0 {
						err = rdbClient.RPush(ctx, pageid, bs).Err()
						if err != nil {
							logger.Errorf("push links to redis failed: %v", err)
						}
						page.flushed = len(page.wals)
						//Pages.Set(pageid, page)
					}
				}
			}

		}
		//}
		NeedFlushPages[tmp] = make(map[string]Empty)
	}

}

func initSpecialDir() error {
	result := rdbClient.HGetAll(context.Background(), hKey).Val()
	for key, value := range result {
		i, err := strconv.Atoi(value)
		if err != nil {
			return err
		}
		switch key {
		case WalName:
			WalInode = Ino(i)
		case BaseName:
			BaseInode = Ino(i)
		case GlobalName:
			GlobalInode = Ino(i)
		case ControlName:
			ControlInode = Ino(i)
		}
	}
	vals := rdbClient.HGetAll(context.Background(), AllDbName).Val()
	for key, value := range vals {
		i, err := strconv.Atoi(value)
		if err != nil {
			return err
		}
		AllDbInode[Ino(i)] = key
	}
	logger.Infoln("all db map are ", AllDbInode)

	return nil
}

func InitRedis(uri string) {
	//PagesBuf = &PagesInfo{
	//	Pages: make(map[string]*PageInfo),
	//}
	Pages = cmap.New()
	NeedFlushPages[0] = make(map[string]Empty)
	NeedFlushPages[1] = make(map[string]Empty)
	//PagesToFlush = &NeedFlush{
	//	Prefix: make(map[string]Empty),
	//}
	PagesWithWal = &PagesPref{
		Pages: make(map[string]*Set),
	}
	opt, err := redis.ParseURL(uri)
	if err != nil {
		logger.Warningf("fail to get redis url: %v", err)
	}
	if opt == nil {
		opt = &redis.Options{
			Addr: "localhost:6379",
			DB:   0,
		}
	}
	logger.Println("opt ", opt.Addr, opt.DB, opt.Password, opt.Username)
	rdbClient = redis.NewClient(opt)

	err = initSpecialDir()
	if err != nil {
		logger.Fatalf("initial special dir's inode faield: %v", err)
	}
//	rebuildLinks()
//}
//
//func rebuildLinks() {
//	keys, err := rdbClient.Keys(context.Background(), "*_*_*_*").Result()
//	if err != nil && err != redis.Nil {
//		logger.Fatalf("cannot list all page links: %v", err)
//	}
//	for _, key := range keys {
//		lidx := strings.LastIndex(key, "_")
//		pref := key[:lidx]
//		blk, err := strconv.Atoi(key[lidx+1:])
//		if err != nil {
//			logger.Fatalf("PagesWithWal struct contain error: %v", err)
//		}
//		if pages, ok := PagesWithWal.Pages[pref]; ok {
//			if _, exist := pages.s[blk]; !exist{
//				pages.s[blk] = empty
//			}
//		} else {
//			set := &Set{s: map[int]Empty{int(blk): empty}}
//			PagesWithWal.Pages[pref] = set
//		}
//	}
}
