//the tiny file storage server
package tfnode

import (
	"errors"
	"hash/crc32"
	"io"
	"net"
	"os"
	"strconv"
	"sync/atomic"
	"time"

	"nimblestore"
	"util/config"
	"util/log"
	"util/zkwrapper"
)

type Server struct {
	ip      string
	port    string
	zk      string
	datadir string
	metadir string
	monitor string

	weight    string //availability weight
	dc        string //datacenter
	rg        int    //replica group
	id        int    //own id
	mode      int
	isLeader  bool
	syncWrite bool

	zkw          *zkwrapper.ZkWrapper
	followerAddr [FullReplFactor]string

	store *nimblestore.Store
	stats *Stats

	chunkChan chan *ChunkRepl
	state     int32

	readLog  *log.Logger
	writeLog *log.Logger
	errLog   *log.Logger
	workLog  *log.Logger
	fatalLog *log.Logger
	infoLog  *log.Logger
}

//server utility functions - initLog, checkConfig, setState, initChunkChan
func (s *Server) initLog() error {
	var (
		fp  *os.File
		err error
	)
	const LogFileOpt = os.O_RDWR | os.O_CREATE | os.O_APPEND
	logOpt := log.LstdFlags | log.Lmicroseconds

	//read op log
	fp, err = os.OpenFile(s.metadir+"/"+ReadLogFileName+"."+time.Now().Format("2006-01-02"), LogFileOpt, 0666)
	if err != nil {
		return errors.New("ReadLogFileOpenFailed")
	}
	s.readLog = log.New(fp, "", logOpt)

	//write op log
	fp, err = os.OpenFile(s.metadir+"/"+WriteLogFileName+"."+time.Now().Format("2006-01-02"), LogFileOpt, 0666)
	if err != nil {
		return errors.New("WriteLogFileOpenFailed")
	}
	s.writeLog = log.New(fp, "", logOpt)

	//err, fatal, warn, info log
	fp, err = os.OpenFile(s.metadir+"/"+ErrLogFileName+"."+time.Now().Format("2006-01-02"), LogFileOpt, 0666)
	if err != nil {
		return errors.New("ErrLogFileOpenFailed")
	}
	s.errLog = log.New(fp, "", logOpt)

	fp, err = os.OpenFile(s.metadir+"/"+FatalLogFileName+"."+time.Now().Format("2006-01-02"), LogFileOpt, 0666)
	if err != nil {
		return errors.New("FaltalLogFileOpenFailed")
	}
	s.fatalLog = log.New(fp, "", logOpt)

	fp, err = os.OpenFile(s.metadir+"/"+WorkLogFileName+"."+time.Now().Format("2006-01-02"), LogFileOpt, 0666)
	if err != nil {
		return errors.New("WorkLogFileOpenFailed")
	}
	s.workLog = log.New(fp, "", logOpt)

	fp, err = os.OpenFile(s.metadir+"/"+InfoLogFileName+"."+time.Now().Format("2006-01-02"), LogFileOpt, 0666)
	if err != nil {
		return errors.New("InfoLogFileOpenFailed")
	}
	s.infoLog = log.New(fp, "", logOpt)

	return nil
}

func (s *Server) checkConfig(cfg *config.Config) error {
	s.ip = cfg.GetString("ip")
	s.port = cfg.GetString("port")
	s.datadir = cfg.GetString("datadir")
	s.metadir = cfg.GetString("metadir")

	if s.ip == "" || s.port == "" || s.datadir == "" || s.metadir == "" {
		return errors.New("Bad Config File")
	}

	if cfg.GetString("mode") == "single" {
		s.mode = SingleMode
		return nil
	}

	s.mode = ReplMode
	s.zk = cfg.GetString("zk")
	s.dc = cfg.GetString("dc")
	s.monitor = cfg.GetString("mon")
	idStr := cfg.GetString("id")

	if s.zk == "" || s.dc == "" || idStr == "" {
		return errors.New("Bad Config File")
	}

	s.id, _ = strconv.Atoi(idStr)
	//check validity of server id
	if s.id <= 0 || s.id > MaxServerId {
		return errors.New("BadServerId")
	}

	s.rg = GrpId(s.id)
	s.isLeader = IsLeader(s.id)

	s.syncWrite = ("true" == cfg.GetString("sync"))

	return nil
}

//atomic load/set
func (s *Server) setState(state int32) {
	atomic.StoreInt32(&s.state, state)
}
func (s *Server) getState() int32 {
	return atomic.LoadInt32(&s.state)
}

func (s *Server) initChunkChan(size int) {
	s.chunkChan = make(chan *ChunkRepl, size)
	for i := 1; i <= size; i++ {
		c := NewChunkRepl(i)
		s.chunkChan <- c
	}
}
func (s *Server) chunkChanFull() bool {
	return len(s.chunkChan) == nimblestore.ChunkFileCount
}

// server main operations - New, Start, and Shutdown
func NewServer() *Server {
	return new(Server)
}

func (s *Server) Start(cfg *config.Config) error {
	//firstly check the config values
	e := s.checkConfig(cfg)
	if e != nil {
		return e
	}
	//log
	e = s.initLog()
	if e != nil {
		return e
	}
	//storage engine
	s.store, e = nimblestore.NewStore(s.metadir, s.datadir)
	if e != nil {
		return e
	}

	//server stats
	s.stats = NewStats(s.id)
	s.stats.FreeSpace = s.store.StatFreeSpace()
	if s.stats.FreeSpace == 0 {
		return errors.New("NoEnoughSpace")
	}

	if s.mode == SingleMode {
		return s.listenAndServe()
	}

	//the initial state
	s.setState(ReplGroupSplit)

	//leader is responsible for building replication relationship with followers
	if s.isLeader {
		//initialize the chunk id channel
		s.initChunkChan(nimblestore.ChunkFileCount)

		go func() {
			s.serveReplGroupChange()
		}()
	} else {
		go func() {
			for {
				s.ReportStats()
				s.checkLogRotation()
				s.checkDiskStatus()
				s.checkZkConnHealth()
				<-time.After(NormalWakeUpPeriod * time.Minute)
			}
		}()
	}

	//start to serve
	return s.listenAndServe()
}

func (s *Server) Shutdown() {
	s.setState(ShuttingDown)
}

// server core functions - listenAndServer, serveConn, handlePrimaryWrite, ...
func (s *Server) listenAndServe() error {
	//firstly, listen on the port
	l, errListen := net.Listen("tcp", ":"+s.port)
	if errListen != nil {
		s.errLog.Println("Error: failed to listen ", errListen)
		return errListen
	}

	//then serve the connections
	for {
		conn, errAccept := l.Accept()
		if errAccept != nil {
			s.errLog.Println("Error: failed to accept ", errAccept)
			return errAccept
		}
		if s.getState() == ShuttingDown {
			break
		}

		//now handle the connection
		go func() {
			s.serveConn(conn)
		}()
	}

	//close the socket
	l.Close()
	return nil
}

//serve a single connection
func (s *Server) serveConn(rc net.Conn) {
	s.stats.AddConnection()
	remoteAddr := rc.RemoteAddr().String()
	s.workLog.Println("Info: accepted a connection from " + remoteAddr)

	c, ok := rc.(*net.TCPConn)
	if !ok {
		rc.Close()
		s.errLog.Println("Error: tcp conn convert failed")
	}

	c.SetNoDelay(true)

	for {
		//in case the server was shut down..
		if s.getState() == ShuttingDown {
			break
		}

		req := NewPacket()
		e := req.ReadFromConn(c)
		if e != nil {
			if e == io.EOF {
				s.workLog.Println("Info: the conn was closed by the peer")
			} else {
				s.errLog.Println("Error: failed to read from the conn, ", e)
			}

			break
		}

		var errWrite error

		switch req.Opcode {
		case FullFileRead:
			errWrite = s.handleRead(req, c)

		case FullFileWrite:
			if s.mode == ReplMode && s.isLeader == false {
				s.workLog.Println("Warn: bad request from " + remoteAddr)
				break
			}

			errWrite = s.handlePrimaryWrite(req, c)

		case ReplWrite:
			if s.mode == SingleMode || s.isLeader == true {
				s.workLog.Println("Warn: bad request from " + remoteAddr)
				break
			}

			errWrite = s.handleReplWrite(req, c)

		case FileDelete:
			s.workLog.Println("Warn: bad request from " + remoteAddr)
			break

		case ReplDelete:
			s.workLog.Println("Warn: bad request from " + remoteAddr)
			break

		case Watermark:
			if s.mode == SingleMode || s.isLeader == true {
				s.workLog.Println("Warn: bad request from " + remoteAddr)
				break
			}

			reply := s.handleWatermark(req)
			errWrite = reply.WriteToConn(c)

		case SyncFrom:
			if s.mode == SingleMode || s.isLeader == true {
				s.workLog.Println("Warn:bad request from" + remoteAddr)
				break
			}

			reply := s.handleSyncFrom(req)
			errWrite = reply.WriteToConn(c)

		case SyncTo:
			if s.mode == SingleMode || s.isLeader == true {
				s.workLog.Println("Warn:bad request from" + remoteAddr)
				break
			}

			reply := s.handleSyncTo(req)
			errWrite = reply.WriteToConn(c)

		default:
			s.workLog.Printf("Warn: unknown request cmd:\n", req.Opcode)
		}

		if errWrite != nil {
			s.errLog.Println("Error: have to break the conn, ", errWrite)
			break
		}
	}

	//close the connection
	c.Close()
	s.stats.RemoveConnection()
	s.workLog.Println("Info: closed the connection from " + remoteAddr)
}

//the hard-core stuff
func (s *Server) handlePrimaryWrite(req *Packet, c *net.TCPConn) error {
	now := time.Now()

	reply := NewPacket()

	//check the data size
	totalSize := int64(req.Arglength[0])
	if totalSize <= 0 || totalSize > MaxDataSize {
		s.errLog.Printf("Error: refuse write due to invalid datasize, %v\n", totalSize)
		e := errors.New("InvalidDataSize")
		reply.PackErrorReply("InvalidDataSize")
		reply.WriteToConn(c)
		return e
	}

	//if the server isn't write-available, refuse write request
	if s.getState() != ReplGroupReady {
		s.errLog.Println("Error: refuse write due to unready replgroup")
		e := errors.New("UnreadyReplGroup")
		reply.PackErrorReply("UnreadyReplGroup")
		reply.WriteToConn(c)
		return e
	}

	//grasp a chunk
	cr := <-s.chunkChan

	var crc uint32
	var err error

	var errPrimary error
	var errFetch error

	//decide the next internal key
	initialOffset, errStat := s.store.GetWatermark(cr.cid)
	if errStat != nil {
		err = errors.New("PrimaryDiskIOError")
		s.fatalLog.Printf("Fatal: chunk %v failed to stat, %v\n", cr.cid, err.Error())
		s.stats.OnDiskReadError()
		//release the chunk
		s.chunkChan <- cr
		reply.PackErrorReply(err.Error())
		reply.WriteToConn(c)
		return err
	}

	//the pipelining write process
	remainingSize := totalSize
	for remainingSize > 0 {
		offset := initialOffset + (totalSize - remainingSize)
		dataSize := remainingSize
		if dataSize > WriteDataBufSize {
			dataSize = WriteDataBufSize
		}
		remainingSize -= dataSize

		errRead := req.ReadData(c, dataSize)
		if errRead != nil {
			//release the chunk
			s.chunkChan <- cr
			s.workLog.Println("Warn: failed to read data from client, ", errRead)
			reply.PackErrorReply(errRead.Error())
			reply.WriteToConn(c)
			return errRead
		}

		//let's fire it, firstly relay data to the followers
		if s.mode == ReplMode {
			for _, f := range cr.followers {
				err = f.RelayWriteCmd(req.Args[0], cr.cid, initialOffset, totalSize, offset, dataSize)
				if err != nil {
					s.errLog.Printf("Error: failed to relay write to follower %v, %v\n", f.addr, err)
					break
				}
			}

			if err != nil {
				//change server state
				s.setState(ReplGroupSplit)
				//release it
				s.chunkChan <- cr
				s.stats.OnReplGroupSplitError()
				s.errLog.Printf("Error: replgrp split on write (%v, %v, %v) %v\n", cr.cid, offset, dataSize, err)
				err = errors.New("ReplGroupSplit")
				reply.PackErrorReply(err.Error())
				reply.WriteToConn(c)
				return err
			}
		}

		errPrimary = s.store.FullFileWrite(req.Args[0], cr.cid, offset)
		if errPrimary != nil {
			s.fatalLog.Printf("Fatal: disk write error (%v, %v, %v) %v\n", cr.cid, offset, dataSize, errPrimary)
			s.stats.OnDiskWriteError()
			err = errors.New("PrimaryDiskIOError")

			if remainingSize > 0 {
				//change server state
				s.setState(ReplGroupSplit)
				s.chunkChan <- cr
				reply.PackErrorReply(err.Error())
				reply.WriteToConn(c)
				return err
			}
		}

		crc = crc32.Update(crc, crc32.IEEETable, req.Args[0])
	}

	//fetch replies from followers
	if s.mode == ReplMode {
		for _, f := range cr.followers {
			errFetch = f.FetchWriteReply()
			if errFetch != nil {
				err = errFetch
			}
		}
	}

	if err != nil {
		s.errLog.Printf("Error: failed to handle write (%v, %v, %v) %v\n", cr.cid, initialOffset, totalSize, err)
		s.setState(ReplGroupSplit)
		s.chunkChan <- cr
		s.stats.OnReplGroupSplitError()
		err = errors.New("ReplGroupSplit")
		reply.PackErrorReply(err.Error())
		reply.WriteToConn(c)
		return err
	}

	//release the handle here
	s.chunkChan <- cr

	if s.syncWrite == true {
		//sync the chunk for data safety
		errPrimary = s.store.SyncChunk(cr.cid)
		if errPrimary != nil {
			s.fatalLog.Printf("Fatal: disk sync write error (%v, %v, %v) %v\n",
				cr.cid, initialOffset, totalSize, errPrimary)
			//change server state
			s.setState(ReplGroupSplit)
			s.chunkChan <- cr
			s.stats.OnDiskWriteError()
			err = errors.New("PrimaryDiskIOError")
			reply.PackErrorReply(err.Error())
			reply.WriteToConn(c)
			return err
		}
	}

	dt := time.Since(now)
	s.stats.OnWrite(totalSize, dt.Nanoseconds()/1e6)
	s.writeLog.Printf("[%v/%v/%v/%v], %v ms\n", cr.cid, initialOffset, totalSize, crc, dt.Nanoseconds()/1e6)

	reply.PackWriteReply(cr.cid, initialOffset, totalSize, crc)
	return reply.WriteToConn(c)
}

func (s *Server) handlePrimaryDelete(req *Packet) *Packet {
	reply := NewPacket()
	reply.PackErrorReply("UnImplemented")
	s.errLog.Println("Error: unimplemented to handle primary delete")

	return reply
}

func (s *Server) handleRead(req *Packet, c *net.TCPConn) error {
	var (
		err           error
		chunk         int
		size          int64
		offset        int64
		dataSize      int64
		remainingSize int64
		curOffset     int64
	)

	now := time.Now()
	res := NewPacket()

	chunk, offset, size, err = req.ParseInternalKey()
	if err != nil {
		s.errLog.Println("Error: failed to parse the internal key " + string(req.Args[0]))
		res.PackErrorReply("BadInternalKeyError")
		return res.WriteToConn(c)
	}

	//the piplined read optimization

	//firstly write out the header - note it's always ok so the client has to check crc.
	res.PackReadReply(size)
	err = res.WriteToConn(c)
	if err != nil {
		return err
	}

	//write out the data body
	remainingSize = size
	for remainingSize > 0 {
		dataSize = remainingSize
		if dataSize > ReadDataBufSize {
			dataSize = ReadDataBufSize
		}
		curOffset = offset + (size - remainingSize)

		remainingSize -= dataSize

		res.Args[0], err = s.store.ReadDataToBuf(chunk, curOffset, dataSize, res.Args[0])
		if err != nil {
			s.fatalLog.Printf("Fatal: disk read error [%v/%v/%v], %v\n", chunk, curOffset, dataSize, err)
			s.stats.OnDiskReadError()
			return errors.New("DiskReadError")
		}

		err = res.WriteData(c)
		if err != nil {
			return err
		}
	}

	dt := time.Since(now)
	s.stats.OnRead(size, dt.Nanoseconds()/1e6)

	s.readLog.Printf("[%v/%v/%v], %v ms\n", chunk, offset, size, dt.Nanoseconds()/1e6)
	return nil
}

func (s *Server) handleReplWrite(req *Packet, c *net.TCPConn) error {
	var (
		chunk         int
		initialOffset int64
		totalSize     int64
		offset        int64
		length        int64
		curOff        int64
		err           error
	)

	//time it
	now := time.Now()

	chunk, initialOffset, totalSize, offset, length, err = req.ParseReplWriteKey()
	if err != nil {
		s.errLog.Println("Error: failed to parse the internal key ", err)
		return errors.New("BadInternalKeyError")
	}

	//check watermark to make sure a safe write
	curOff, err = s.store.GetWatermark(chunk)
	if err != nil {
		s.fatalLog.Println("Fatal: failed to get watermark", err)
		s.stats.OnDiskReadError()
		return errors.New("ReplDiskReadError")
	}

	//here we deliver strong consistency!
	if curOff != offset {
		s.errLog.Println("Error: dangerous repl write")
		return errors.New("DangerousReplWrite")
	}

	err = s.store.FullFileWrite(req.Args[1], chunk, offset)
	if err != nil {
		s.fatalLog.Printf("Fatal: failed to write (chunk=%v,offset=%v,lenth=%v)\n", chunk, offset, length)
		s.stats.OnDiskWriteError()
		return errors.New("ReplDiskWriteError")
	}

	//if the write is completed then reply to the primary
	if offset+length == initialOffset+totalSize {
		res := NewPacket()
		res.Opcode = OkReplWrite
		err = res.WriteToConn(c)
	}

	dt := time.Since(now)

	s.writeLog.Printf("[%v/%v/%v,%v/%v], %v ms\n", chunk, initialOffset, totalSize, offset, length, dt.Nanoseconds()/1e6)
	return err
}

func (s *Server) handleReplDelete(req *Packet) *Packet {
	reply := NewPacket()
	reply.PackErrorReply("UnImplemented")
	s.errLog.Println("Error: unimplemented to handle repl delete")

	return reply
}

func (s *Server) handleWatermark(req *Packet) (res *Packet) {
	res = NewPacket()
	chunk, err := strconv.Atoi(string(req.Args[0]))
	if err != nil {
		res.PackErrorReply("ParseWatermarkChunkError")
		return res
	}

	offset, err := s.store.GetWatermark(chunk)
	if err != nil {
		s.stats.OnDiskReadError()
		res.PackErrorReply("ReplDiskReadError")
		return res
	}

	s.infoLog.Printf("Info: handle watermark [%v/%v]\n", chunk, offset)

	//pack the reply
	res.Opcode = OkWatermark
	res.Argnum = 1
	res.Args[0] = []byte(strconv.FormatInt(offset, 10))
	res.Arglength[0] = uint32(len(res.Args[0]))
	return res
}

func (s *Server) handleSyncFrom(req *Packet) (res *Packet) {
	res = NewPacket()
	chunk, offset, length, err := req.ParseInternalKey()
	if err != nil {
		s.errLog.Println("Error: failed to parse the internal key " + string(req.Args[0]))
		res.PackErrorReply("BadInternalKeyError")
		return res
	}
	res.Args[0], err = s.store.FullFileRead(chunk, offset, length)
	if err != nil {
		s.errLog.Println("Error: failed to handle syncfrom " + string(req.Args[0]) + " due to " + err.Error())
		s.stats.OnDiskReadError()
		res.PackErrorReply("ReplDiskReadError")
		return res
	}

	s.infoLog.Printf("Info: Sync to the primary, [%v/%v/%v]\n", chunk, offset, length)

	//pack the reply
	res.Opcode = OkSyncFrom
	res.Argnum = 1
	res.Arglength[0] = uint32(len(res.Args[0]))
	return
}

func (s *Server) handleSyncTo(req *Packet) (res *Packet) {
	res = NewPacket()
	chunk, offset, length, err := req.ParseInternalKey()
	if err != nil {
		s.errLog.Println("Error: failed to parse the internal key " + string(req.Args[0]))
		res.PackErrorReply("BadInternalKeyError")
		return res
	}

	err = s.store.FullFileWrite(req.Args[1], chunk, offset)
	if err != nil {
		s.errLog.Println("Error: failed to handle syncto " + string(req.Args[0]) + " due to " + err.Error())
		s.stats.OnDiskWriteError()
		res.PackErrorReply("ReplDiskWriteError")
		return res
	}

	s.infoLog.Printf("Info: Sync from the primary, [%v/%v/%v]\n", chunk, offset, length)

	res.Opcode = OkSyncTo
	return
}
