package shardkv

import "net"
import "fmt"
import "net/rpc"
import "time"
import "paxos"
import "sync"
import "os"
import "syscall"
import "encoding/gob"
import "math/rand"
import "shardmaster"
import "strconv"
import "strings"
import "util"

const (
	GET            = "GET"
	PUT            = "PUT"
	START_RECONFIG = "START_RECONFIG"
	END_RECONFIG   = "END_RECONFIG"
	SEND_SHARD     = "SEND_SHARD"
	RECV_SHARD     = "RECV_SHARD"
	HEART_BEAT     = "HEART_BEAT"
	PREPARE        = "PREPARE"
	COMMIT         = "COMMIT"
	ABORT          = "ABORT"
)

type ReqR struct { // Request Record in cache
	Key   string
	Value string
}

type Op struct {
	// Your definitions here.
	UID     int64  // UID of get/put request
	UID_CFG string // UID of reconfig action
	OpType  string

	Key   string
	Value string

	ShardNum     int
	Num          int
	Shard        map[string]string
	RequestCache map[int64]ReqR

	Config     shardmaster.Config
	NextConfig shardmaster.Config

	TranID int64
	Pairs  []Pair
}

type ShardKV struct {
	mu         sync.Mutex
	l          net.Listener
	me         int
	dead       bool // for testing
	unreliable bool // for testing
	sm         *shardmaster.Clerk
	px         *paxos.Paxos

	gid int64 // my replica group ID

	// Your definitions here.
	inReconfig bool
	shards     [shardmaster.NShards]int64
	config     shardmaster.Config
	nextConfig shardmaster.Config
	insNum     int

	kvTable       map[string]string
	requestCache  map[int64]ReqR
	reconfigCache map[string]bool

	ck           *Clerk2
	locks        map[string]string
	transactions map[int64][]string
	logger *util.Logger

	// for testing
	Pause_transaction bool
	pause_reconfig    bool
}

func (kv *ShardKV) Get(args *GetArgs, reply *GetReply) error {
	kv.logger.Logger.Printf("Get %v\n", args.Key)
	kv.mu.Lock()
	defer kv.mu.Unlock()

	// if in the cache, directly reply
	record, exists := kv.requestCache[args.UID]
	if exists {
		reply.Err = OK
		reply.Value = record.Value
		return nil
	}

	// write this op to the paxos log and wait this op is done
	getOp := &Op{}
	getOp.OpType = GET
	getOp.UID = args.UID
	getOp.UID_CFG = ""
	getOp.Key = args.Key
	reply.Err = kv.ExecuteOp(getOp)

	if reply.Err == OK {
		// otherwise the op should be applied
		// a little bit redundant, re-check the cache
		record, exists = kv.requestCache[args.UID]
		if !exists {
			// BUG!
			fmt.Printf("ERROR: G%v S%v get op not applied!! K=%v UID=%v\n", kv.gid,
				kv.me, args.Key, args.UID)
		}
		value2, exists2 := kv.kvTable[args.Key]
		if exists2 {
			reply.Err = OK
			reply.Value = value2
		} else {
			reply.Err = ErrNoKey
			reply.Value = ""
		}
		kv.logger.Logger.Printf("Got %v: %v\n", args.Key, reply.Value)
	} else {
		kv.logger.Logger.Printf("Get error: %v\n", reply.Err)
	}

	return nil
}

func (kv *ShardKV) Put(args *PutArgs, reply *PutReply) error {
	kv.logger.Logger.Printf("Put (%v, %v)\n", args.Key, args.Value)
	kv.mu.Lock()
	defer kv.mu.Unlock()

	// if in the cache, directly reply
	_, exists := kv.requestCache[args.UID]
	if exists {
		reply.Err = OK
		return nil
	}

	// write this op to the paxos log and wait this op is done
	putOp := &Op{}
	putOp.OpType = PUT
	putOp.UID = args.UID
	putOp.UID_CFG = ""
	putOp.Key = args.Key
	putOp.Value = args.Value
	reply.Err = kv.ExecuteOp(putOp)

	if reply.Err == OK {
		// otherwise the op should be applied
		// a little bit redundant, re-check the cache
		_, exists = kv.requestCache[args.UID]
		if !exists {
			// BUG!
			fmt.Printf("ERROR: G%v S%v put op not applied!! K=%v UID=%v\n", kv.gid,
				kv.me, args.Key, args.UID)
		}
	}

	return nil
}

func (kv *ShardKV) Puts(args *PutsArgs, reply *PutsReply) error {
	kv.logger.Logger.Printf("Puts %v\n", args.Pairs)
	// act as the coordinator
	participants := kv.get_participants(args.Pairs)
	kv.logger.Logger.Printf("Participants: %v\n", participants)
	tran_id := nrand()
	all_ok := true
	for gid, pairs := range participants {
		if !kv.ck.Prepare(gid, tran_id, pairs) {
			all_ok = false
		}
	}
	kv.logger.Logger.Printf("After prepare: %v\n", all_ok)
	// for testing
	kv.logger.Logger.Printf("Pause_transaction=%v\n", kv.Pause_transaction)
	for kv.Pause_transaction {
		time.Sleep(10 * time.Millisecond)
	}
	if !all_ok {
		chs := make(map[int64]chan bool)
		for gid, _ := range participants {
			chs[gid] = make(chan bool)
			go func(gid int64, ch chan bool) {
				for {
					err := kv.ck.Abort(gid, tran_id)
					if err == OK {
						break
					}
					kv.logger.Logger.Printf("%v:%v: failed to tell %v to abort trans %v (err: %v). Retrying ...\n", kv.gid, kv.me, gid, get_keys(args.Pairs), err)
					time.Sleep(10 * time.Millisecond)
				}
				ch <- true
			}(gid, chs[gid])
		}
		for gid, _ := range participants {
			<-chs[gid]
		}
		reply.Err = ErrPrepareError
		return nil
	}
	chs := make(map[int64]chan bool)
	for gid, _ := range participants {
		chs[gid] = make(chan bool)
		go func(gid int64, ch chan bool) {
			for {
				err := kv.ck.Commit(gid, tran_id)
				if err == OK {
					break
				}
				//				fmt.Printf("%v:%v: failed to tell %v to commit trans %v (err: %v). Retrying ...\n", kv.gid, kv.me, gid, get_keys(args.Pairs), err)
				time.Sleep(10 * time.Millisecond)
			}
			ch <- true
		}(gid, chs[gid])
	}
	for gid, _ := range participants {
		<-chs[gid]
	}
	reply.Err = OK
	return nil
}

func (kv *ShardKV) Prepare(args *PrepareArgs, reply *PrepareReply) error {
	kv.mu.Lock()
	defer kv.mu.Unlock()

	// if in the cache, directly reply
	_, exists := kv.requestCache[args.UID]
	if exists {
		reply.Err = OK
		return nil
	}

	// write this op to the paxos log and wait this op is done
	op := &Op{}
	op.OpType = PREPARE
	op.UID = args.UID
	op.UID_CFG = ""
	op.TranID = args.TranID
	op.Pairs = args.Pairs
	reply.Err = kv.ExecuteOp(op)
	return nil
}

func (kv *ShardKV) ExecutePrepare(op *Op) Err {
	if kv.inReconfig {
		return ErrInReconfig
	}

	for _, p := range op.Pairs {
		if kv.config.Shards[key2shard(p.Key)] != kv.gid {
			return ErrWrongGroup
		}
		if _, exists := kv.locks[p.Key]; exists {
			return ErrLocked
		}
	}

	kv.transactions[op.TranID] = get_keys(op.Pairs)
	for _, p := range op.Pairs {
		kv.locks[p.Key] = p.Value
	}
	kv.requestCache[op.UID] = ReqR{fmt.Sprintf("%v", op.TranID), ""}
	return OK
}

func (kv *ShardKV) Commit(args *PrepareArgs, reply *PrepareReply) error {
	kv.mu.Lock()
	defer kv.mu.Unlock()

	// if in the cache, directly reply
	_, exists := kv.requestCache[args.UID]
	if exists {
		reply.Err = OK
		return nil
	}

	// write this op to the paxos log and wait this op is done
	op := &Op{}
	op.OpType = COMMIT
	op.UID = args.UID
	op.UID_CFG = ""
	op.TranID = args.TranID
	reply.Err = kv.ExecuteOp(op)
	return nil
}

func (kv *ShardKV) ExecuteCommit(op *Op) Err {
	if kv.inReconfig {
		return ErrInReconfig
	}

	keys, exists := kv.transactions[op.TranID]
	if !exists {
		return ErrNoTransaction
	}
	if len(keys) == 0 {
		// this transaction has been committed
		return OK
	}

	for _, key := range keys {
		if kv.config.Shards[key2shard(key)] != kv.gid {
			return ErrWrongGroup
		}
		if _, exists := kv.locks[key]; !exists {
			return ErrNotLocked
		}
	}

	for _, key := range keys {
		kv.kvTable[key] = kv.locks[key]
		delete(kv.locks, key)
	}
	// to mark that this transaction has been committed
	kv.transactions[op.TranID] = make([]string, 0)
	kv.requestCache[op.UID] = ReqR{fmt.Sprintf("%v", op.TranID), ""}
	return OK
}

func (kv *ShardKV) Abort(args *PrepareArgs, reply *PrepareReply) error {
//	kv.logger.Logger.Printf("Abort %v\n", args.TranID)
	kv.mu.Lock()
	defer kv.mu.Unlock()

	// if in the cache, directly reply
	_, exists := kv.requestCache[args.UID]
	if exists {
		reply.Err = OK
		return nil
	}

	// write this op to the paxos log and wait this op is done
	op := &Op{}
	op.OpType = ABORT
	op.UID = args.UID
	op.UID_CFG = ""
	op.TranID = args.TranID
	reply.Err = kv.ExecuteOp(op)
	return nil
}

func (kv *ShardKV) ExecuteAbort(op *Op) Err {
	if kv.inReconfig {
//		kv.logger.Logger.Println("Abort: In Recofing")
		return ErrInReconfig
	}

	keys, exists := kv.transactions[op.TranID]
	if !exists {
		// maybe didn't prepare at all, ok to abort
		return OK
	}

	for _, key := range keys {
		if kv.config.Shards[key2shard(key)] != kv.gid {
			kv.logger.Logger.Println("Abort: Wrong group")
			return ErrWrongGroup
		}
	}

	for _, key := range keys {
		delete(kv.locks, key)
	}
	kv.requestCache[op.UID] = ReqR{fmt.Sprintf("%v", op.TranID), ""}
	return OK
}

const (
	ErrPrepareError  = "PrepareError"
	ErrLocked        = "Locked"
	ErrNotLocked     = "NotLocked"
	ErrNoTransaction = "NoSuchTransaction"
	ErrTesting       = "ForTestingPurpose"
	ErrInReconfig    = "InReconfig"
)

func (kv *ShardKV) get_participants(pairs []Pair) map[int64][]Pair {
	m := make(map[int64][]Pair)
	for _, p := range pairs {
		gid := kv.config.Shards[key2shard(p.Key)]
		values, exists := m[gid]
		if !exists {
			values = make([]Pair, 0)
		}
		m[gid] = append(values, p)
	}
	return m
}

func get_keys(pairs []Pair) (ret []string) {
	ret = make([]string, 0)
	for _, p := range pairs {
		ret = append(ret, p.Key)
	}
	return
}

type PrepareArgs struct {
	UID    int64
	TranID int64
	Pairs  []Pair
}

type PrepareReply struct {
	Err Err
}

func (kv *ShardKV) Receive(args *ReceiveArgs, reply *ReceiveReply) error {

	kv.mu.Lock()
	defer kv.mu.Unlock()

	// if in the cache, directly reply
	_, exists := kv.reconfigCache[args.UID_CFG]
	if exists {
		reply.Err = OK
		return nil
	}

	// write this op to the paxos log and wait this op is done
	recvOp := &Op{}
	recvOp.OpType = RECV_SHARD
	recvOp.UID_CFG = args.UID_CFG
	recvOp.Num = args.Num
	recvOp.ShardNum = args.ShardNum
	recvOp.Shard = args.Shard
	recvOp.RequestCache = args.RequestCache
	kv.ExecuteOp(recvOp)

	// if "not ready"
	if kv.inReconfig == false || kv.config.Num < args.Num {
		reply.Err = ErrNotReady
		return nil
	}

	// re-check the configuration num is correct
	if kv.config.Num != recvOp.Num {
		// BUG!
		fmt.Printf("ERROR: G%v S%v recv op not applied!! Op.Num=%v CFG#=%v\n", kv.gid,
			kv.me, recvOp.Num, kv.config.Num)
		reply.Err = ErrNotReady
		return nil
	}

	reply.Err = OK

	return nil
}

//
//
//
func (kv *ShardKV) StartReconfig(config shardmaster.Config,
	newConfig shardmaster.Config) {
	kv.mu.Lock()
	defer kv.mu.Unlock()

	startOp := Op{}
	startOp.OpType = START_RECONFIG
	startOp.UID_CFG = START_RECONFIG + "#" + strconv.Itoa(newConfig.Num)
	startOp.Config = config
	startOp.NextConfig = newConfig

	// this operation is already executed
	_, exists := kv.reconfigCache[startOp.UID_CFG]
	if exists {
		return
	}

	kv.ExecuteOp(&startOp)
}

func (kv *ShardKV) EndReconfig(config shardmaster.Config,
	newConfig shardmaster.Config) {
	kv.mu.Lock()
	defer kv.mu.Unlock()

	endOp := Op{}
	endOp.OpType = END_RECONFIG
	endOp.UID_CFG = END_RECONFIG + "#" + strconv.Itoa(newConfig.Num)
	endOp.Config = config
	endOp.NextConfig = newConfig

	// this operation is already executed
	_, exists := kv.reconfigCache[endOp.UID_CFG]
	if exists {
		return
	}

	kv.ExecuteOp(&endOp)
}

func (kv *ShardKV) StartSend(config shardmaster.Config,
	newConfig shardmaster.Config) {
	kv.mu.Lock()
	defer kv.mu.Unlock()

	sendOp := &Op{}
	sendOp.OpType = SEND_SHARD
	sendOp.UID_CFG = SEND_SHARD + "#" + strconv.Itoa(newConfig.Num)

	// this operation is already executed
	_, exists := kv.reconfigCache[sendOp.UID_CFG]
	if exists {
		return
	}

	kv.ExecuteOp(sendOp)
}

func (kv *ShardKV) WaitRecv(config shardmaster.Config,
	newConfig shardmaster.Config) {
	//fmt.Printf("G%v S%v start to wait for recv ops. %v -> %v \n",
	//            kv.gid, kv.me, config.Num, newConfig.Num)

	if kv.inReconfig == false {
		return
	}

	// check kv.config or kv.shards
	for {
		kv.mu.Lock()
		kv.logger.Logger.Println("Still in for")

		// special case when recfg 0 -> 1
		if kv.config.Num == 0 {
			kv.mu.Unlock()
			break
		}

		// already advanced to future configs
		if kv.config.Num > config.Num {
			kv.mu.Unlock()
			break
		}

		// gets all shards specifiedin newConfig?
		ok := true
		for i := 0; i < shardmaster.NShards; i++ {
			if newConfig.Shards[i] == kv.gid &&
				kv.shards[i] != kv.gid {
				ok = false
			}
		}
		if ok {
			kv.mu.Unlock()
			break
		}

		beatOp := &Op{}
		beatOp.UID = nrand()
		beatOp.OpType = HEART_BEAT
		kv.ExecuteOp(beatOp)

		kv.mu.Unlock()

		time.Sleep(1000 * time.Millisecond)
	}

	//fmt.Printf("G%v S%v finish wait for recv ops. %v -> %v \n",
	//            kv.gid, kv.me, config.Num, newConfig.Num)
}

func (kv *ShardKV) ExecuteOp(op *Op) (ret Err) {
	for {
		kv.px.Start(kv.insNum, *op)

		to := 10 * time.Millisecond
		for {
			decided, res := kv.px.Status(kv.insNum)
			if decided {
				kv.insNum += 1
				op2 := res.(Op)
				kv.logger.Logger.Printf("Executing: %v: %v\n", kv.insNum, op2)
				if op2.UID_CFG == "" {
					if op2.OpType == GET {
						ret = kv.ExecuteGet(&op2)
					}
					if op2.OpType == PUT {
						ret = kv.ExecutePut(&op2)
					}
					if op2.OpType == HEART_BEAT {
					}
					if op2.OpType == PREPARE {
						ret = kv.ExecutePrepare(&op2)
					}
					if op2.OpType == COMMIT {
						ret = kv.ExecuteCommit(&op2)
					}
					if op2.OpType == ABORT {
						ret = kv.ExecuteAbort(&op2)
					}
//					kv.px.Done(kv.insNum - 1)
					if op.UID_CFG == "" && op.UID == op2.UID {
						return
					}
				} else {
					if op2.OpType == START_RECONFIG {
						kv.ExecuteStart(&op2)
					}
					if op2.OpType == END_RECONFIG {
						kv.ExecuteEnd(&op2)
					}
					if op2.OpType == SEND_SHARD {
						kv.ExecuteSend(&op2)
					}
					if op2.OpType == RECV_SHARD {
						kv.ExecuteRecv(&op2)
					}
//					kv.px.Done(kv.insNum - 1)
					if op.UID_CFG != "" && op.UID_CFG == op2.UID_CFG {
						return
					}
				}
				break
			}

			time.Sleep(to)
			if to < 10*time.Second {
				to *= 2
			}
		}
	}
	return
}

func (kv *ShardKV) ExecuteStart(op *Op) {
	//fmt.Printf("G%v S%v execute start cfg#%v->%v %v %v\n", kv.gid,
	//    kv.me, op.Config.Num, op.NextConfig.Num, op.Config.Shards, op.NextConfig.Shards)

	// if in a transaction
	if len(kv.locks) > 0 {
		return
	}

	if op.Config.Num != kv.config.Num {
		fmt.Printf("ERROR: G%v S%v configs do not match!!\n%v\n%v\n", kv.gid,
			kv.me, kv.config, op.Config)
		return
	}

	kv.inReconfig = true
	kv.nextConfig = op.NextConfig
	kv.shards = kv.config.Shards
	kv.reconfigCache[op.UID_CFG] = true
	//	fmt.Printf("%v:%v reconfig %v->%v started\n", kv.gid, kv.me, kv.config.Num, kv.nextConfig.Num)
}

func (kv *ShardKV) ExecuteEnd(op *Op) {
	//fmt.Printf("G%v S%v execute end cfg#%v->%v UID=%v\n", kv.gid,
	//    kv.me, op.Config.Num, op.NextConfig.Num, op.UID_CFG)

	if kv.inReconfig == false {
		return
	}

	if op.Config.Num != kv.config.Num {
		fmt.Printf("ERROR: G%v S%v configs do not match!!\n%v\n%v\n", kv.gid,
			kv.me, kv.config, op.Config)
		return
	}

	kv.inReconfig = false
	kv.config = kv.nextConfig
	kv.reconfigCache[op.UID_CFG] = true
	//	fmt.Printf("%v:%v reconfig %v->%v ended\n", kv.gid, kv.me, op.Config.Num, kv.nextConfig.Num)
}

func (kv *ShardKV) ExecuteRecv(op *Op) {
	//fmt.Printf("G%v S%v execute recv op cfg#%v shard#%v UID=%v", kv.gid,
	//    kv.me, op.Num, op.ShardNum, op.UID_CFG)

	if kv.inReconfig == false || op.Num > kv.config.Num {
		//fmt.Printf("\nERROR: G%v S%v tries to receive when not in reconfig!!\n%v\n%v\n", kv.gid,
		//            kv.me, kv.config, kv.nextConfig)
		//fmt.Printf("\tnot ready\n")
		return
	}

	if op.Num < kv.config.Num {
		//fmt.Printf("\nERROR: G%v S%v config number do not match in recv!!\n%v\n%v\n", kv.gid,
		//            kv.me, op.Num, kv.nextConfig)
		return
	}

	sid := op.ShardNum
	if kv.config.Shards[sid] == kv.nextConfig.Shards[sid] || kv.nextConfig.Shards[sid] != kv.gid {
		fmt.Printf("\nERROR: G%v S%v receives wrong shard # %v!!\n%v\n%v\n", kv.gid,
			kv.me, sid, kv.config, kv.nextConfig)
		return
	}
	for key, value := range op.Shard {
		kv.kvTable[key] = value
	}
	for uid, record := range op.RequestCache {
		kv.requestCache[uid] = record
	}
	kv.shards[sid] = kv.gid
	kv.reconfigCache[op.UID_CFG] = true
	//fmt.Printf("\t%v %v %v\n", kv.config.Shards, kv.shards, kv.nextConfig.Shards)
}

func (kv *ShardKV) ExecuteSend(op *Op) {
	if kv.inReconfig == false {
		//        fmt.Printf("ERROR: G%v S%v tries to send when not in reconfig!!\n%v\n%v\n", kv.gid, kv.me, kv.config, kv.nextConfig)
		return
	}

	/*    if op.Config.Num != kv.config.Num {
	    fmt.Printf("ERROR: ExecuteSend(): G%v S%v configs do not match!!\n%v\n%v\n", kv.gid,
	                kv.me, kv.config, op.Config)
	    return
	}
	*/
	for i := 0; i < shardmaster.NShards; i++ {
		if kv.config.Shards[i] == kv.gid && kv.nextConfig.Shards[i] != kv.gid {
			recvArgs := &ReceiveArgs{}
			recvArgs.ShardNum = i
			recvArgs.Num = kv.config.Num
			recvArgs.UID_CFG = RECV_SHARD + "#" + strconv.Itoa(kv.nextConfig.Num) + "#" + strconv.Itoa(i)

			recvArgs.Shard = map[string]string{}
			recvArgs.RequestCache = map[int64]ReqR{}
			for key, value := range kv.kvTable {
				if key2shard(key) == i {
					recvArgs.Shard[key] = value
				}
			}
			for uid, record := range kv.requestCache {
				if key2shard(record.Key) == i {
					recvArgs.RequestCache[uid] = record
				}
			}

			toGid := kv.nextConfig.Shards[i]
			servers, exists := kv.nextConfig.Groups[toGid]
			if exists == false {
				fmt.Printf("ERROR: G%v S%v GID %v not in the next config!!\n%v\n%v\n", kv.gid,
					kv.me, kv.config, kv.nextConfig)
			} else {
				go func(servers []string, args *ReceiveArgs) {
					for {
						done := false
						for _, server := range servers {
							var recvReply ReceiveReply
							ok := call(server, "ShardKV.Receive", args, &recvReply)
							if ok && recvReply.Err == OK {
								done = true
								break
							}
						}
						if done {
							break
						}
						time.Sleep(100 * time.Millisecond)
					}
				}(servers, recvArgs)
			}

			for key, _ := range recvArgs.Shard {
				delete(kv.kvTable, key)
			}

			for uid, _ := range recvArgs.RequestCache {
				delete(kv.requestCache, uid)
			}
		}
	}
	kv.reconfigCache[op.UID_CFG] = true
}

func (kv *ShardKV) ExecuteGet(op *Op) Err {
	if kv.inReconfig {
		return ErrNotReady
	}
	shardId := key2shard(op.Key)
	if kv.config.Shards[shardId] != kv.gid {
		return ErrWrongGroup
	}

	if _, exists := kv.locks[op.Key]; exists {
		return ErrLocked
	}

	value, exists := kv.kvTable[op.Key]
	if !exists {
		value = ""
	}
	kv.requestCache[op.UID] = ReqR{op.Key, value}
	return OK
}

func (kv *ShardKV) ExecutePut(op *Op) Err {
	// reply "not ready" if in reconfig
	if kv.inReconfig {
		return ErrNotReady
	}

	// reply "wrong group" if shard is not here
	shardId := key2shard(op.Key)
	if kv.config.Shards[shardId] != kv.gid {
		return ErrWrongGroup
	}

	if _, exists := kv.locks[op.Key]; exists {
		return ErrLocked
	}

	kv.kvTable[op.Key] = op.Value
	kv.requestCache[op.UID] = ReqR{op.Key, "@@" + op.Value}
	return OK
}

//
// Ask the shardmaster if there's a new configuration;
// if so, re-configure.
//
func (kv *ShardKV) tick() {
//	kv.logger.Logger.Printf("%v:%v tick()\n", kv.gid, kv.me)
	var config shardmaster.Config
	var newConfig shardmaster.Config

	kv.mu.Lock()
	config = kv.config
	kv.mu.Unlock()

	//    if config.Num == 0 {
	//        newConfig = kv.sm.Query(-1)
	//    } else {
	newConfig = kv.sm.Query(config.Num + 1)
	//    }

	if config.Num == newConfig.Num {
		return
	}

	kv.logger.Logger.Printf("%v -> %v\n", config.Num, newConfig.Num)
	// figure out whether this group is sending shards or receiving shards
	bSend := false
	bRecv := false
	for i := 0; i < shardmaster.NShards; i++ {
		if newConfig.Shards[i] != config.Shards[i] {
			if config.Shards[i] == kv.gid {
				bSend = true
			}
			if newConfig.Shards[i] == kv.gid {
				bRecv = true
			}
		}
	}

	if bSend && bRecv {
		fmt.Printf("ERROR: G%v S%v is both receiving and sending!!!\n",
			kv.gid, kv.me)
		return
	}

	// start reconfig
	kv.StartReconfig(config, newConfig)

	if kv.inReconfig == false {
		return
	}

	for kv.pause_reconfig {
		time.Sleep(10 * time.Millisecond)
	}

	// this group is sending shards
	if bSend {
		//		fmt.Printf("%v:%v reconfig %v->%v sending\n", kv.gid, kv.me, config.Num, newConfig.Num)
		kv.StartSend(config, newConfig)
		//		fmt.Printf("%v:%v reconfig %v->%v sent\n", kv.gid, kv.me, config.Num, newConfig.Num)
	}

	// this group is receiving shards
	if bRecv {
		kv.logger.Logger.Printf("%v:%v reconfig %v->%v receiving\n", kv.gid, kv.me, config.Num, newConfig.Num)
		kv.WaitRecv(config, newConfig)
		kv.logger.Logger.Printf("%v:%v reconfig %v->%v received\n", kv.gid, kv.me, config.Num, newConfig.Num)
	}

	// end reconfig
	kv.EndReconfig(config, newConfig)
}

// tell the server to shut itself down.
func (kv *ShardKV) Alive() bool{
    return !kv.dead
}
func (kv *ShardKV) kill() {
	kv.dead = true
	kv.l.Close()
	kv.px.Kill()
}

type BoolArgs struct {
	Value bool
}

func (kv *ShardKV) SetPauseTransaction(args *BoolArgs, reply *PutReply) error {
	kv.logger.Logger.Printf("SetPauseTransaction %v\n", args.Value)
	kv.Pause_transaction = args.Value
	reply.Err = OK
	return nil
}

//
// Start a shardkv server.
// gid is the ID of the server's replica group.
// shardmasters[] contains the ports of the
//   servers that implement the shardmaster.
// servers[] contains the ports of the servers
//   in this replica group.
// me is the index of this server in servers[].
//
/*
func (kv *ShardKV) GetConn() (bool, []string) {
	return kv.px.GetConn()
}
*/
func (kv *ShardKV) SetGid(gg int64) {
    kv.gid = gg
}
func (kv *ShardKV) SetTag(tag string) {
	kv.px.SetTag(tag)
}

func (kv *ShardKV) CleanDir() {
	kv.logger.Remove()
    kv.px.CleanDir()
}
func StartServer(gid int64, shardmasters []string,
	servers []string, me int) *ShardKV {
	gob.Register(ReqR{})
	gob.Register(shardmaster.Config{})
	gob.Register(Op{})

	kv := new(ShardKV)
	kv.me = me
	kv.gid = gid
	kv.sm = shardmaster.MakeClerk(shardmasters)

	// Your initialization code here.
	// Don't call Join().
	kv.unreliable = false
	kv.kvTable = map[string]string{}
	kv.requestCache = map[int64]ReqR{}
	kv.reconfigCache = map[string]bool{}

	kv.ck = MakeClerk2(shardmasters)
	kv.locks = map[string]string{}
	kv.transactions = map[int64][]string{}

	rpcs := rpc.NewServer()
	rpcs.Register(kv)

	kv.px = paxos.Make(servers, me, rpcs)

	kv.logger = util.NewLogger(servers[me] + ".kv", false)

	var l net.Listener
	var e error

    dotcp := strings.Contains(servers[me], ":")
	if dotcp {
		kv.logger.Logger.Println("Listening to TCP ", servers[me])
		l, e = net.Listen("tcp", servers[me])
	} else {
		kv.logger.Logger.Println("Listening to File ", servers[me])
		os.Remove(servers[me])
		l, e = net.Listen("unix", servers[me])
	}

	if e != nil {
		kv.logger.Logger.Fatalf("listen error: ", e)
	}
	kv.l = l


	// please do not change any of the following code,
	// or do anything to subvert it.

	go func() {
		for kv.dead == false {
			conn, err := kv.l.Accept()
			if err == nil && kv.dead == false {
				if kv.unreliable && (rand.Int63()%1000) < 100 {
					// discard the request.
					conn.Close()
				} else if kv.unreliable && (rand.Int63()%1000) < 200 {
					// process the request but force discard of reply.
					var f *os.File
					if dotcp {
						c1 := conn.(*net.TCPConn)
						f, _ = c1.File()
					} else {
						c1 := conn.(*net.UnixConn)
						f, _ = c1.File()
					}
					err := syscall.Shutdown(int(f.Fd()), syscall.SHUT_WR)
					if err != nil {
						fmt.Printf("shutdown: %v\n", err)
					}
					go rpcs.ServeConn(conn)
				} else {
					go rpcs.ServeConn(conn)
				}
			} else if err == nil {
				conn.Close()
			}
			if err != nil && kv.dead == false {
				fmt.Printf("ShardKV(%v) accept: %v\n", me, err.Error())
				kv.kill()
			}
		}
	}()

	go func() {
		for kv.dead == false {
			kv.tick()
			time.Sleep(250 * time.Millisecond)
		}
	}()

	return kv
}
