package main

import (
	"bytes"
	"crypto/sha256"
	"encoding/json"
	"fmt"
	"io"
	"net/http"
	"strconv"
	"time"

	"gitee.com/czy233/go-ceph/rados"
)

type Server struct {
	ioctx *rados.IOContext
	conn  *rados.Conn
	time  int
	// From    int
	cli     *http.Client
	recover *chan Recover
}

type Recover struct {
	ObjId       string
	BlockHeight uint64
}

type ObjectBlock struct {
	Obj_id  string `json:"obj"`
	Content []byte `json:"content"`
}

type DataWithProof struct {
	Data  []byte      `json:"data"`
	Proof MerkleProof `json:"proof"`
}

func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
	defer r.Body.Close()

	if r.URL.Path == "/get" {
		oid := r.FormValue("oid")
		data := make([]byte, ObjectSize)
		dlen, err := s.ioctx.Read(oid, data, 0) // 未做容错，读取错误无法恢复
		if err != nil {
			fmt.Println("cluster [", *ClusterID, "] ", "get obj from ceph error", oid, err)
			return
		}
		if dlen != ObjectSize {
			fmt.Println("cluster [", *ClusterID, "] ", "get obj from ceph error, length error", oid, err)
			return
		}
		_, err = w.Write(data)
		if err != nil {
			fmt.Println("cluster", *ClusterID, r.URL.Path, "write to client error:", err)
		}
	} else if r.URL.Path == "/put" {
		var obj ObjectBlock
		err := json.NewDecoder(r.Body).Decode(&obj)
		if err != nil {
			return
		}
		err = s.ioctx.WriteFullWithPutHash(obj.Obj_id, obj.Content)
		if err != nil {
			fmt.Println("error when read obj:", err)
			return
		}
		_, _ = w.Write([]byte(strconv.Itoa(len(obj.Content))))
	} else if r.URL.Path == "/check" {
		oid := r.FormValue("oid")
		offset, _ := strconv.Atoi(r.FormValue("off")) // 4K data offset which checked [0-1024)
		height, _ := strconv.ParseUint(r.FormValue("height"), 10, 64)
		var data []byte
		data = make([]byte, ObjectSize)
		dlen, err := s.ioctx.Read(oid, data, 0) // 已经完成跨域容错，可恢复
		if err != nil {
			_, objIsRecovering := recoveringObjsMap.LoadOrStore(oid, true)
			if !objIsRecovering {
				*s.recover <- Recover{ObjId: oid, BlockHeight: height}
				fmt.Println("cluster [", *ClusterID, "] ", "check data err, obj ID:", oid, ", Height:", height, ", err:", err)
			} else {
				fmt.Println("cluster [", *ClusterID, "] ", "check data err, recovering obj ID:", oid, ", Height: ", height, ", err:", err)
			}
			return
		}
		if dlen != ObjectSize {
			fmt.Println("cluster [", *ClusterID, "] ", "check data length err, recovering obj ID:", oid, ", Height: ", height, ", err:", err)
			// *s.recover <- Recover{ObjId: oid, BlockHeight: height}
			return
		}
		var dataWithProof *DataWithProof
		if ObjectSize == 4*1024*1024 {
			var m MerkleTree
			m.InitConfig([]int{1, 32, 32})
			err = m.GenerateTree(data)
			if err != nil {
				fmt.Println("cluster [", *ClusterID, "] ", "generate mktree error", "oid", oid, "offset", offset)
				return
			}
			proof, err := m.GenerateMerkleProof(offset)
			// fmt.Println(proof)
			if err != nil {
				fmt.Println("cluster [", *ClusterID, "] ", "generate mktree proof error", "oid", oid, "offset", offset)
				return
			}
			dataWithProof = &DataWithProof{
				Data:  data[offset<<12 : (offset+1)<<12],
				Proof: proof,
			}
		} else {
			hash := sha256.Sum256(data)
			proof := [][]Sha256{
				[]Sha256{hash[:]},
			}

			dataWithProof = &DataWithProof{
				Data: data,
				Proof: MerkleProof{
					Fork:  []int{1},
					Proof: proof,
				},
			}
		}

		dataWithProofByte, err := json.Marshal(*dataWithProof)
		if err != nil {
			fmt.Println("cluster [", *ClusterID, "] ", "json.Marshal error", "oid", oid, "offset", offset)
			return
		}
		_, _ = w.Write(dataWithProofByte)
		// fmt.Printf("cluster [%d] -> data&proof send to [%d], len: %d, request count: %d\n", *ClusterID, s.From, send_len, s.time)
		s.time++
	} else if r.URL.Path == "/testReadLocalObj" {
		withHash := r.FormValue("withHash")
		iter, err := s.ioctx.Iter()
		if err != nil {
			fmt.Println("cluster [", *ClusterID, "] ", "error when create iterator:", err)
			return
		}
		defer iter.Close()
		count := int64(0)
		start := time.Now()
		for iter.Next() && count < 10 {
			// var wg sync.WaitGroup
			// wg.Add(1)
			oid := iter.Value()
			data := make([]byte, ObjectSize)
			hashFromFabricChan := make(chan []byte)
			if withHash == "true" {
				go func(obj_from int, obj_id string) {
					// defer wg.Done()
					url := "http://" + GetFabricSDKIPPort() + "/get?ID=" + strconv.Itoa(obj_from) + "_" + obj_id
					fmt.Println("cluster", *ClusterID, ":", url)
					resp, err := s.cli.Get(url)
					defer resp.Body.Close()
					ret, err := io.ReadAll(resp.Body)
					if err != nil {
						hashFromFabricChan <- nil
					} else {
						hashFromFabricChan <- ret
					}
				}(*ClusterID, oid)
			}
			dlen, err := s.ioctx.Read(oid, data, 0)
			if err != nil {
				fmt.Println("cluster [", *ClusterID, "] ", "read obj from ceph error", oid, err)
				return
			}
			if dlen != ObjectSize {
				fmt.Println("cluster [", *ClusterID, "] ", "read obj from ceph length error", oid, err)
				return
			}
			// hash := sha256.Sum256(data)
			if withHash == "true" {
				var m MerkleTree
				m.InitConfig([]int{1, 32, 32})
				err = m.GenerateTree(data)
				if err != nil {
					fmt.Println("cluster [", *ClusterID, "] ", "generate mktree error", "oid", oid)
				}
				// _, err = w.Write(hash[:])
				// if err != nil {
				// 	fmt.Println("cluster", *ClusterID, r.URL.Path, "write to client error:", err)
				// }
				// wg.Wait()
				hashFromFabric := <-hashFromFabricChan
				if hashFromFabric == nil {
					fmt.Println("cluster [", *ClusterID, "] ", "read obj from fabric error", oid)
					return
				}
				// else {
				// 	if bytes.Equal(hash[:], hashFromFabric) {
				// 		fmt.Println("cluster [", *ClusterID, "] ", "read obj", oid, "success")
				// 	} else {
				// 		fmt.Println("cluster [", *ClusterID, "] ", "read obj", oid, "failed")
				// 	}
				// }
			}
			count++
		}
		end := time.Now()
		var avgTime int64
		fmt.Println("cluster [", *ClusterID, "] ", "testReadLocalObj time:", end.Sub(start))
		if count != 0 {
			avgTime = end.Sub(start).Nanoseconds() / count
		}
		avgTimejson, _ := json.Marshal(float64(avgTime) / float64(1000000))
		_, _ = w.Write(avgTimejson)
	} else if r.URL.Path == "/testReadRemoteObj" {
		withHash := r.FormValue("withHash")

		objIDsInBlock, err := s.GetBlockFromFabric()
		if err != nil {
			fmt.Println("cluster [", *ClusterID, "] ", "get block from fabric error", err)
			return
		}
		var block Block
		if block.ParseWithParityBlock(objIDsInBlock) == false { // 解析区块到block
			fmt.Printf("cluster [%d] -> block [%d] is empty, do nothing\n", *ClusterID, block.BlockId)
			return
		}
		count := int64(0)
		start := time.Now()
		for _, obj := range block.Objs { // 循环抽查区块内的所有对象
			if int(obj.From) == *ClusterID {
				continue
			}
			oid := string(obj.Oid)
			hashFromFabricChan := make(chan []byte)
			if withHash == "true" {
				go func(obj_from int, obj_id string) {
					// defer wg.Done()
					url := "http://" + GetFabricSDKIPPort() + "/get?ID=" + strconv.Itoa(obj_from) + "_" + obj_id
					fmt.Println("cluster", *ClusterID, ":", url)
					resp, err := s.cli.Get(url)
					defer resp.Body.Close()
					ret, err := io.ReadAll(resp.Body)
					if err != nil {
						hashFromFabricChan <- nil
					} else {
						hashFromFabricChan <- ret
					}
				}(int(obj.From), oid)
			}
			data := make([]byte, ObjectSize)
			data, err := s.GetObjFromCluster(oid, int(obj.From))
			if err != nil {
				fmt.Println("cluster [", *ClusterID, "] ", "read obj from remote cluster error", oid, err)
				return
			}
			if withHash == "true" {
				var m MerkleTree
				m.InitConfig([]int{1, 32, 32})
				err = m.GenerateTree(data)
				if err != nil {
					fmt.Println("cluster [", *ClusterID, "] ", "generate mktree error", "oid", oid)
				}
				hash := m.Root()
				// _, err = w.Write(hash[:])
				// if err != nil {
				// 	fmt.Println("cluster", *ClusterID, r.URL.Path, "write to client error:", err)
				// }
				// wg.Wait()
				// hash := sha256.Sum256(data)
				hashFromFabric := <-hashFromFabricChan
				if hashFromFabric == nil {
					fmt.Println("cluster [", *ClusterID, "] ", "read obj from fabric error", oid)
					return
				} else {
					if bytes.Equal(hash, hashFromFabric) {
						fmt.Println("cluster [", *ClusterID, "] ", "read obj", oid, "success")
					} else {
						fmt.Println("cluster [", *ClusterID, "] ", "read obj", oid, "failed")
					}
				}
			}
			count++
			if count >= 10 {
				break
			}
		}

		end := time.Now()
		var avgTime int64
		fmt.Println("cluster [", *ClusterID, "] ", "testReadRemoteObj time:", end.Sub(start))
		if count != 0 {
			avgTime = end.Sub(start).Nanoseconds() / count
		}
		avgTimejson, _ := json.Marshal(float64(avgTime) / float64(1000000))
		_, _ = w.Write(avgTimejson)
	} else {
		fmt.Println("cluster", *ClusterID, "非法路径:", r.URL.Path)
	}
}

func (s *Server) GetBlockFromFabric() (*ObjIDsInBlock, error) {
	url := "http://" + GetFabricSDKIPPort() + "/getBlock?height=" + strconv.Itoa(HeightStartCheck)
	// fmt.Println("cluster", *ClusterID, ":", url)
	resp, err := s.cli.Get(url)
	defer resp.Body.Close()
	ret, err := io.ReadAll(resp.Body)
	if err != nil {
		return nil, err
	}
	objIDsInBlock := &ObjIDsInBlock{}
	err = json.Unmarshal(ret, objIDsInBlock)
	if err != nil {
		return nil, err
	}
	return objIDsInBlock, nil
}

func (s *Server) GetObjFromCluster(obj_id string, from int) ([]byte, error) {
	url := PrefixDealObj(from) + "get?oid=" + obj_id
	resp, err := s.cli.Get(url)
	if err != nil {
		fmt.Printf("cluster [%d] -> get obj from cluster [%d] error: %s\n", *ClusterID, from, err)
		return nil, err
	}
	defer resp.Body.Close()
	ret, err := io.ReadAll(resp.Body)
	if err != nil {
		return nil, err
	}
	if len(ret) != ObjectSize {
		return nil, fmt.Errorf("cluster [%d] -> obj read len is not equal to ObjectSize, expect: %d, get: %d", *ClusterID, ObjectSize, len(ret))
	}
	return ret, nil
}

// func tls_server(cluster int, res *chan Recover) {
// 	conn, err := rados.NewConn()
// 	if err != nil {
// 		fmt.Println("cluster [", *ClusterID, "] ", "error when invoke a new connection:", err)
// 		return
// 	}
// 	defer conn.Shutdown()

// 	err = conn.ReadConfigFile(CephConfigPath)
// 	if err != nil {
// 		fmt.Println("cluster [", *ClusterID, "] ", "error when read default config file:", err)
// 		return
// 	}

// 	err = conn.Connect()
// 	if err != nil {
// 		fmt.Println("cluster [", *ClusterID, "] ", "error when connect:", err)
// 		return
// 	}

// 	fmt.Println("cluster [", *ClusterID, "] ", "connect ceph cluster ok!")

// 	ioctx, err := conn.OpenIOContext(CephPoolName)
// 	if err != nil {
// 		fmt.Println("cluster [", *ClusterID, "] ", "error when open IO context:", err)
// 		return
// 	}

// 	pool := x509.NewCertPool()
// 	addTrustCAcert(pool, GetCAcert(cluster))

// 	s := Server{ioctx: ioctx, conn: conn, recover: res}

// 	server := &http.Server{
// 		Addr:    ":" + PortPrefixTLS + strconv.Itoa(cluster),
// 		Handler: &s,
// 		TLSConfig: &tls.Config{
// 			ClientCAs:  pool,
// 			ClientAuth: tls.RequireAndVerifyClientCert,
// 		},
// 	}

// 	serverCert := GetCert(*ClusterID)
// 	serverKey := GetPriKey(*ClusterID)

// 	err = server.ListenAndServeTLS(serverCert, serverKey) //添加服务端证书和密钥
// 	if err != nil {
// 		fmt.Println("cluster [", *ClusterID, "] ", "ListenAndServeTLS err:", err)
// 	}

// 	// fmt.Println(pool.Subjects())
// }

func OssServer(clusterNum int, res *chan Recover) {
	conn, err := rados.NewConn()
	if err != nil {
		fmt.Println("cluster [", *ClusterID, "] ", "error when invoke a new connection to ceph:", err)
		return
	}
	defer conn.Shutdown()

	err = conn.ReadConfigFile(CephConfigPath)
	if err != nil {
		fmt.Println("cluster [", *ClusterID, "] ", "error when read ceph config file:", err)
		return
	}

	err = conn.Connect()
	if err != nil {
		fmt.Println("cluster [", *ClusterID, "] ", "error when connect to ceph:", err)
		return
	}

	fmt.Printf("cluster [%d] connect ceph cluster ok!", *ClusterID)

	ioctx, err := conn.OpenIOContext(CephPoolName)
	if err != nil {
		fmt.Println("cluster [", *ClusterID, "] ", "error when open IO context:", err)
		return
	}

	s := Server{ioctx: ioctx, conn: conn, cli: &http.Client{}, recover: res}
	// pool := x509.NewCertPool()
	// for i := 0; i < clusterNum; i++ {
	// 	addTrustCAcert(pool, GetCAcert(i))
	// }

	server := &http.Server{
		Addr:    ":" + GetCephClusterPort(*ClusterID),
		Handler: &s,
		// TLSConfig: &tls.Config{
		// 	ClientCAs:  pool,
		// 	ClientAuth: tls.RequireAndVerifyClientCert,
		// },
	}

	err = server.ListenAndServe() //添加服务端证书和密钥
	if err != nil {
		fmt.Println("cluster [", *ClusterID, "] ", "ListenAndServe err:", err)
	}
}
