package tfnode

import (
	"errors"
	"nimblestore"
	"strconv"
	"time"
	"util/zkwrapper"
)

//grasp follower addresses from zk
func (s *Server) detectFollowersAllOnboarding() bool {
	for j := 1; j <= FullReplFactor; j++ {
		addr, e := s.zkw.GetServerValue("/jfs-root/tfnode-rg/"+strconv.Itoa(s.rg), s.id+j)
		if e != nil {
			s.errLog.Printf("Error: follower %v is missing\n", s.id+j)
			return false
		}

		if s.followerAddr[j-1] != addr {
			s.followerAddr[j-1] = addr
		}
	}

	return true
}

//check disk exceptions - full or corrupt
func (s *Server) checkDiskStatus() bool {
	if s.store.StatFreeSpace() == 0 {
		s.fatalLog.Println("Fatal: the disk is to be full")
		s.setState(DiskAlreadyFull)
		s.setAvailabilityLevel(DiskAlreadyFull)
		return false
	}

	if s.stats.DiskErrors() > MaxDiskErrors {
		s.fatalLog.Println("Fatal: too many disk errors so switch to unavailable")
		s.setState(DiskCorrupted)
		s.setAvailabilityLevel(DiskCorrupted)
		return false
	}
	return true
}

//connect Zk and add the paths
func (s *Server) registerWithZk() error {
	var e error
	s.zkw, e = zkwrapper.NewZkWrapper(s.zk)
	if e != nil {
		s.zkw = nil
		s.fatalLog.Println("Fatal: failed to connect ZK ", e)
		return e
	}

	//wait for zk session expiry
	<-time.After(NormalWakeUpPeriod * time.Second)

	//add itself to its own repl dir
	e = s.zkw.AddEphemeralServer("/jfs-root/tfnode-rg/"+strconv.Itoa(s.rg), s.id, s.ip+":"+s.port)
	if e != nil {
		s.zkw.Close()
		s.zkw = nil
		s.fatalLog.Println("Fatal: failed to register with ZK ", e)
		return e
	}

	//add to client-visible dir
	weight := WeightFactor(s.store.StatFreeSpace(), s.getState())
	e = s.zkw.AddEphemeralServer("/jfs-root/tfnode", s.id, s.ip+":"+s.port+","+strconv.FormatInt(weight, 10)+"|"+s.dc)
	if e != nil {
		s.zkw.Close()
		s.zkw = nil
		s.fatalLog.Println("Fatal: failed to register with ZK ", e)
		return e
	}

	s.infoLog.Println("Info: register with Zk")
	return nil
}

//return false if zk conn was broken
func (s *Server) checkZkConnHealth() bool {
	if s.zkw == nil {
		return s.registerWithZk() == nil
	}

	if s.zkw.CheckExistingNode("/jfs-root/tfnode/"+strconv.Itoa(s.id)) &&
		s.zkw.CheckExistingNode("/jfs-root/tfnode-rg/"+strconv.Itoa(s.rg)+"/"+strconv.Itoa(s.id)) {
		return true
	}

	s.infoLog.Println("Info: Zk connection was broken so re-connect")
	s.zkw.Close()
	return s.registerWithZk() == nil
}

func (s *Server) chunkFollowerSyncFrom(chunk int, f *Follower) error {
	offsetPeer, err := f.GetWatermark(chunk)
	if err != nil {
		//here we do re-connect
		err = f.Reconnect()
		if err != nil {
			return err
		}
		offsetPeer, err = f.GetWatermark(chunk)
		if err != nil {
			return err
		}
	}

	offset, e := s.store.GetWatermark(chunk)
	if e != nil {
		return e
	}

	for offsetPeer > offset {
		bufSize := offsetPeer - offset
		if bufSize > DataSyncBufSize {
			bufSize = DataSyncBufSize
		}

		data, e := f.SyncFrom(chunk, offset, bufSize)
		if e != nil {
			return e
		}

		e = s.store.FullFileWrite(data, chunk, offset)
		if e != nil {
			return e
		}

		offset += bufSize
		s.infoLog.Printf("Info: sync from follower, [%v|%v|%v], %v\n", chunk, offset, bufSize, f.addr)
	}

	return e
}

func (s *Server) chunkFollowerSyncTo(chunk int, f *Follower) error {
	offsetPeer, err := f.GetWatermark(chunk)
	if err != nil {
		//here we do re-connect
		err = f.Reconnect()
		if err != nil {
			return err
		}
		offsetPeer, err = f.GetWatermark(chunk)
		if err != nil {
			return err
		}
	}

	offset, e := s.store.GetWatermark(chunk)
	if e != nil {
		return e
	}

	for offsetPeer < offset {
		bufSize := offset - offsetPeer
		if bufSize > DataSyncBufSize {
			bufSize = DataSyncBufSize
		}

		data, e := s.store.FullFileRead(chunk, offsetPeer, bufSize)
		if e != nil {
			return e
		}
		e = f.SyncTo(chunk, offsetPeer, bufSize, data)
		if e != nil {
			return e
		}

		offsetPeer += bufSize
		s.infoLog.Printf("Info: sync to follower, [%v|%v|%v], %v\n", chunk, offsetPeer, bufSize, f.addr)
	}

	return e
}

func (s *Server) prepareReplRelation() {
	s.infoLog.Println("start to sync up data with followers")
	//prevent writes from clients
	s.setState(ReplGroupRecovery)

	if !s.chunkChanFull() {
		s.errLog.Println("Error: chunk chan is not full: ", len(s.chunkChan))
		return
	}

	var e, err error
	for i := 0; i < nimblestore.ChunkFileCount; i++ {
		var readyFollowers int
		cr := <-s.chunkChan

		//firstly sync from both followers
		for j, f := range cr.followers {
			//check connection
			if f.addr != s.followerAddr[j] || f.Disconnected() {
				e = f.Setup(s.followerAddr[j])
				if e != nil {
					s.errLog.Println("Error: failed to set up follower ", f.addr, e)
					continue
				}
			}

			e = s.chunkFollowerSyncFrom(cr.cid, f)
			if e != nil {
				s.errLog.Printf("Error: chunk %v syncfrom failed, %v\n", cr.cid, e)
				continue
			}

			readyFollowers += 1
		}
		//then sync to them
		for j, f := range cr.followers {
			//check connection
			if f.addr != s.followerAddr[j] || f.Disconnected() {
				e = f.Setup(s.followerAddr[j])
				if e != nil {
					s.errLog.Println("Error: failed to set up follower ", f.addr, e)
					continue
				}
			}
			e = s.chunkFollowerSyncTo(cr.cid, f)
			if e != nil {
				s.errLog.Printf("Error: chunk %v syncfrom failed, %v\n", cr.cid, e)
				continue
			}

			readyFollowers += 1
		}

		//release to the channel
		s.chunkChan <- cr

		if readyFollowers < FullReplFactor*2 {
			s.errLog.Println("Error: failed to prepare repl group")
			err = errors.New("UnreadyReplGroup")
			break
		}

		s.infoLog.Printf("Info: succeed to sync up chunk file %v\n", i+1)
	}

	var state int32
	state = ReplGroupSplit
	if err == nil {
		state = ReplGroupReady
		s.infoLog.Println("Info: succeed to prepare repl group")
	}
	s.setState(state)
	s.setAvailabilityLevel(state)
}

func (s *Server) setAvailabilityLevel(state int32) error {
	if s.zkw == nil {
		s.errLog.Println("Error: zk is not available")
		return errors.New("ZkUnavailable")
	}

	weight := s.store.StatFreeSpace() * int64(state)

	err := s.zkw.SetServerValue("/jfs-root/tfnode", s.id, s.ip+":"+s.port+","+strconv.FormatInt(weight, 10)+"|"+s.dc)
	if err == nil {
		s.infoLog.Printf("Info: update to Zk with weight %v\n", weight)
		return nil
	}

	//close the handle in case of errors
	s.zkw.Close()
	s.zkw = nil
	s.fatalLog.Println("Fatal: failed to update Zk")
	return err
}

func (s *Server) serveReplGroupChange() {
	for {
		//report stats to the monitor
		s.ReportStats()

		//check disk corruption and zk exception
		if s.checkDiskStatus() == false || s.checkZkConnHealth() == false {
			<-time.After(AbnormalWakeUpPeriod * time.Minute)
			continue
		}

		noFollowerAbsent := s.detectFollowersAllOnboarding()
		switch {
		case noFollowerAbsent && s.getState() != ReplGroupReady:
			//make the replica group ready
			s.prepareReplRelation()
		case noFollowerAbsent == false && s.getState() == ReplGroupReady:
			s.setState(ReplGroupSplit)
			s.setAvailabilityLevel(ReplGroupSplit)
		default:
			//keep the current state
			s.setAvailabilityLevel(s.getState())
		}

		//in case of zk errors, zkw was cleared
		if s.zkw == nil {
			s.fatalLog.Println("Fatal: zkw was cleared so continue")
			<-time.After(AbnormalWakeUpPeriod * time.Minute)
			continue
		}

		//some follower is missing, so watch the repl-group dir on Zk
		ch, e := s.zkw.WatchChildren("/jfs-root/tfnode-rg/" + strconv.Itoa(s.rg))
		if e != nil {
			//close the handle in case of errors
			s.zkw.Close()
			s.zkw = nil
			s.fatalLog.Println("Fatal: failed to watch Zk")
			<-time.After(AbnormalWakeUpPeriod * time.Minute)
			continue
		}

		select {
		case <-ch:
			s.infoLog.Println("Info: notified by Zk")
			continue
		case <-time.After(NormalWakeUpPeriod * time.Minute):
			s.infoLog.Println("Info: timeout to check group changes")
			continue

		}
	}
}
