package encdb

import (
	"encoding/base64"
	"fhe-search-cloud/g"
	"fhe-search-cloud/gendbtools/plaindb"
	"fhe-search-cloud/models"
	"fhe-search-cloud/utils"
	"fhe-search-cloud/utils/log"
	"fhe-search-cloud/utils/prober"
	"fmt"
	"os"

	"gitee.com/fhe-search/fhe-search-rpc/encsdk"

	"github.com/jmoiron/sqlx"
)

func GenEncDBMain() {
	p := prober.NewProber(g.GetConfig().MysqlConfig.DbName + "-GenEncDBMain")
	p.AddProbe("DataSetLoadToDB")
	plaindb.DataSetLoadToDB()
	// p.AddProbe("wiki2019zh-shrink-1k")
	// plaindb.ShrinkPlainDocs(1_000)
	// p.AddProbe("wiki2019zh shrink-10k")
	// plaindb.ShrinkPlainDocs(10_000)
	// p.AddProbe("wiki2019zh-shrink-100k")
	// plaindb.ShrinkPlainDocs(100_000)
	// p.AddProbe("wiki2019zh-shrink-1m")
	// plaindb.ShrinkPlainDocs(1_000_000)
	db := plaindb.GetDbConn(
		g.GetConfig().MysqlConfig.User,
		g.GetConfig().MysqlConfig.Password,
		g.GetConfig().MysqlConfig.Host,
		g.GetConfig().MysqlConfig.Port,
		g.GetConfig().MysqlConfig.DbName,
	)
	// 分词，生成明文数据库
	p.AddProbe(g.GetConfig().MysqlConfig.DbName + "-GenerateBigPlainDB")
	plaindb.GenerateBigPlainDB(db)
	// 加密，生成密文数据库
	p.AddProbe(g.GetConfig().MysqlConfig.DbName + "-GenerateEncDB")
	GenerateEncDB(db)
	// 删除明文数据库和对应的文件
	p.AddProbe(g.GetConfig().MysqlConfig.DbName + "-DeletePlainDB")
	plaindb.DeletePlainDB(db)
	p.EndProbe()
	log.Log(p.Report())
}

func GenEncDBNamed(dbName string) {
	db := plaindb.GetDbConn(
		g.GetConfig().MysqlConfig.User,
		g.GetConfig().MysqlConfig.Password,
		g.GetConfig().MysqlConfig.Host,
		g.GetConfig().MysqlConfig.Port,
		dbName,
	)
	GenEncDBCore(db)
}

func GenEncDBCore(db *sqlx.DB) {
	// 分词
	plaindb.GenerateBigPlainDB(db)
	// 加密
	GenerateEncDB(db)
	// 删除明文数据库
	// plaindb.DropPlainDBTables(db)
}

// GenerateEncDB
/*
generate documents/term/index table, all encrypted.
documents use sm1 enc
term use fhe enc
freq field in index use sm1 enc
*/
func GenerateEncDB(db *sqlx.DB) {
	initEncDBTables(db)
	encDocuments(db)
	encTermTables(db)
	encIndex1(db)
}

func initEncDBTables(db *sqlx.DB) {
	var sql string
	// drop if exists term/index/enc_documents
	for termLen := 1; termLen <= g.MaxTermLen; termLen++ {
		sql = fmt.Sprintf("drop table if exists term%d ;", termLen)
		db.MustExec(sql)
	}
	sql = "drop table if exists index1;"
	db.MustExec(sql)
	sql = "drop table if exists enc_documents;"
	db.MustExec(sql)
	// create if not exists term/index/enc_documents
	for termLen := 1; termLen <= g.MaxTermLen; termLen++ {
		sql = fmt.Sprintf(`CREATE TABLE IF NOT EXISTS term%d (
			term_id BIGINT PRIMARY KEY AUTO_INCREMENT,
			bucket8 INT,
			fhe_term BLOB,
			KEY (bucket8));`, termLen)
		db.MustExec(sql)
	}
	sql = `CREATE TABLE IF NOT EXISTS index1 (
	     term_id BIGINT,
	     term_len INT,
	     doc_id BIGINT,
	     enc_freq TinyBlob,
	     PRIMARY KEY (term_id, term_len, doc_id));`
	db.MustExec(sql)
	sql = `CREATE TABLE IF NOT EXISTS enc_documents (
	     id BIGINT PRIMARY KEY AUTO_INCREMENT,
	     enc_title BLOB,
		 enc_file_type BLOB,
	     enc_file_path VARCHAR(255));`
	db.MustExec(sql)
	log.Log("drop and create table term/index1/enc_documents finished.")
}

// // GeneratePlainDB
// func GeneratePlainDB() {
// 	var err error
// 	// get docs
// 	// var docs []models.Document
// 	// err = GetEncDBConn().Select(&docs, fmt.Sprintf("select * from %s;", PlainDocsTableName))
// 	// if err != nil {
// 	// 	panic(err)
// 	// }
// 	var docs []models.PlainDocument
// 	err = GetEncDBConn().Select(&docs, fmt.Sprintf("select * from documents;"))
// 	if err != nil {
// 		panic(err)
// 	}
// 	for i := 0; i < len(docs); i++ {
// 		filePath := docs[i].FilePath
// 		var contentBytes []byte
// 		contentBytes, err = os.ReadFile(filePath)
// 		if err != nil {
// 			panic(err)
// 		}
// 		docs[i].Content = string(contentBytes)
// 	}

// 	fmt.Println("read plain docs finished.")
// 	// cut
// 	// dict用法：termDocItems := dict[term][termLen]
// 	dict := make([]map[string][]models.TermDocItem, g.MaxTermLen+1)
// 	for i := 0; i <= g.MaxTermLen; i++ {
// 		dict[i] = make(map[string][]models.TermDocItem)
// 	}
// 	cutter := g.GetCutter()
// 	for i := 0; i < len(docs); i++ {
// 		keywords := cutter.CutForSearch(docs[i].Content, true)
// 		frequency := utils.RemoveDuplicatesAndCount(keywords)
// 		for term, freq := range frequency {
// 			termLen := utils.CharNum(term)
// 			if termLen > g.MaxTermLen {
// 				continue
// 			}
// 			tdt := models.TermDocItem{
// 				DocId: docs[i].Id,
// 				Freq:  freq,
// 			}
// 			dict[termLen][term] = append(dict[termLen][term], tdt)
// 		}
// 		fmt.Printf("docs %d cut finished.\n", i)
// 	}
// 	// write to db
// 	termIdMap := make(map[string]int)
// 	var sql string
// 	const PlainTermsColumnNum = 2
// 	for i := 1; i <= g.MaxTermLen; i++ {
// 		var PlainTermData []interface{}
// 		id := 1
// 		for term := range dict[i] {
// 			termIdMap[term] = id
// 			PlainTermData = append(PlainTermData, models.PlainTermItem{TermId: id, Term: term})
// 			id++
// 		}
// 		// write to term_i
// 		sql = fmt.Sprintf("insert into plain_term%d (term_id, term) values (:term_id, :term)", i)
// 		batchWriteToDB(sql, PlainTermsColumnNum, PlainTermData)
// 		fmt.Printf("plain_term%d generate finished.\n", i)
// 	}
// 	var PlainIndexData []interface{}
// 	for termLen := 1; termLen <= g.MaxTermLen; termLen++ {
// 		for term, termDocItems := range dict[termLen] {
// 			for _, termDocItem := range termDocItems {
// 				PlainIndexData = append(PlainIndexData, models.PlainIndexItem{
// 					TermId:  termIdMap[term],
// 					TermLen: termLen,
// 					DocId:   termDocItem.DocId,
// 					Freq:    termDocItem.Freq,
// 				})
// 			}
// 		}
// 	}
// 	sql = "insert into plain_index (term_id, term_len, doc_id, freq) values (:term_id, :term_len, :doc_id, :freq)"
// 	const PlainIndexColumnNum = 4 // plain_index表的列数
// 	batchWriteToDB(sql, PlainIndexColumnNum, PlainIndexData)
// 	fmt.Println("plain_index generate finished.")
// }

// func batchWriteToDB(sql string, columnNum int, data []interface{}) {
// 	plaindb.BatchWriteToDB(GetEncDBConn(), sql, columnNum, data)
// }

func encDocuments(db *sqlx.DB) {
	const EncFileDir = "EncData/"
	fileBasePath := g.GetConfig().FheSearchConfig.FileBasePath
	utils.ClearDir(fileBasePath + EncFileDir)
	utils.ClearDir(fileBasePath + "EncDataFromAdd")
	var err error
	// get docs
	var docs []models.PlainDocument
	// var docs []models.Document
	err = db.Select(&docs, "select * from documents;")
	if err != nil {
		panic(err)
	}
	for i := 0; i < len(docs); i++ {
		// 读取文件
		contentPath := fileBasePath + docs[i].FilePath
		var content []byte
		content, err = os.ReadFile(contentPath)
		if err != nil {
			panic(err)
		}
		contentBase64 := base64.StdEncoding.EncodeToString(content)
		//加密明文文档并写入密文文档表
		var sm1Title, sm1Content, sm1FileType []byte
		sm1Title, err = encsdk.SymEncUtf8(docs[i].Title)
		if err != nil {
			panic(err)
		}
		sm1Content, err = encsdk.SymEncUtf8(contentBase64)
		if err != nil {
			panic(err)
		}
		sm1FileType, err = encsdk.SymEncUtf8(docs[i].FileType)
		if err != nil {
			panic(err)
		}
		encDoc := models.EncDocument{
			Id:          docs[i].Id,
			EncTitle:    sm1Title,
			EncFileType: sm1FileType,
			EncFilePath: fmt.Sprintf("%sEncDoc%d.txt", EncFileDir, docs[i].Id),
		}
		// 写入密文文件
		err = os.WriteFile(fileBasePath+encDoc.EncFilePath, sm1Content, 0644)
		if err != nil {
			panic(err)
		}
		//插入enc_documents表
		insertSQL2 := `INSERT INTO enc_documents (id, enc_title,enc_file_type, enc_file_path) VALUES (:id, :enc_title, :enc_file_type, :enc_file_path)`
		db.NamedExec(insertSQL2, encDoc)
		if i%1000 == 0 {
			log.Logf("enc doc index %d insert finished. doc len is %d.", i, len(content))
		}
	}
}

func encTermTables(db *sqlx.DB) {
	for termLen := 1; termLen <= g.MaxTermLen; termLen++ {
		encSingleTermTable(db, termLen)
	}
}

func encSingleTermTable(db *sqlx.DB, termLen int) {
	const TermsColumnNum = 3
	const BatchSize = g.MysqlMaxAllUnitNum / TermsColumnNum
	var sql string
	var err error
	tableSize := 0
	sql = fmt.Sprintf("select count(1) from plain_term%d;", termLen)
	err = db.Get(&tableSize, sql)
	if err != nil {
		panic(err)
	}
	if tableSize == 0 {
		return
	}
	batchNum := tableSize / BatchSize
	for i := 0; i <= batchNum; i++ {
		// 批量读取
		var plainTermItems []models.PlainTermItem
		sql = fmt.Sprintf("select * from plain_term%d limit %d offset %d;", termLen, BatchSize, i*BatchSize)
		err = db.Select(&plainTermItems, sql)
		if err != nil {
			panic(err)
		}
		// 加密
		var termItems []models.TermItem
		for _, plainTermItem := range plainTermItems {
			var fheTerm []byte
			fheTerm, err = encsdk.FheEncUtf8(plainTermItem.Term)
			if err != nil {
				panic(err)
			}
			termItems = append(termItems, models.TermItem{
				TermId:  plainTermItem.TermId,
				Bucket8: encsdk.GetBucket8(plainTermItem.Term),
				FheTerm: fheTerm,
			})
		}
		// 批量插入
		sql = fmt.Sprintf("insert into term%d(term_id, bucket8, fhe_term) values(:term_id, :bucket8, :fhe_term)", termLen)
		_, err = db.NamedExec(sql, termItems)
		if err != nil {
			panic(err)
		}
	}
	log.Logf("term%d encrypted. data rows num: %d", termLen, tableSize)
}

func encIndex1(db *sqlx.DB) {
	const Index1ColumnNum = 4
	const BatchSize = g.MysqlMaxAllUnitNum / Index1ColumnNum
	var sql string
	var err error
	tableSize := 0
	sql = "select count(1) from plain_index;"
	err = db.Get(&tableSize, sql)
	if err != nil {
		panic(err)
	}
	if tableSize == 0 {
		return
	}
	batchNum := tableSize / BatchSize
	for i := 0; i <= batchNum; i++ {
		// 批量读取数据
		var plainIndexItems []models.PlainIndexItem
		sql = fmt.Sprintf("select * from plain_index limit %d offset %d;", BatchSize, i*BatchSize)
		err = db.Select(&plainIndexItems, sql)
		if err != nil {
			panic(err)
		}
		// 加密
		var freqArr []int32
		for _, plainIndexItem := range plainIndexItems {
			freqArr = append(freqArr, int32(plainIndexItem.Freq))
		}
		// 批量加密 PEncS32Ex
		var encFreqArr [][]byte
		encFreqArr, err = encsdk.SymEncS32Ex(freqArr)
		if err != nil {
			panic(err)
		}
		var indexItems []models.IndexItem
		for idx, plainIndexItem := range plainIndexItems {
			indexItems = append(indexItems, models.IndexItem{
				TermId:  plainIndexItem.TermId,
				TermLen: plainIndexItem.TermLen,
				DocId:   plainIndexItem.DocId,
				EncFreq: encFreqArr[idx],
			})
		}
		// 批量写入数据库
		sql = "insert into index1(term_id,term_len, doc_id, enc_freq) values(:term_id,:term_len, :doc_id, :enc_freq);"
		_, err = db.NamedExec(sql, indexItems)
		if err != nil {
			panic(err)
		}
		log.Logf("index1 encrypt rate of process: %d/%d.", i, batchNum)
	}
	log.Logf("index1 encrypted. data rows num: %d", tableSize)
}
