package plaindb

import (
	"fhe-search-cloud/g"
	"fhe-search-cloud/models"
	"fhe-search-cloud/utils"
	"fhe-search-cloud/utils/log"
	"fhe-search-cloud/utils/shardedmap"
	"fmt"
	"os"
	"sync"
	"sync/atomic"

	"github.com/jmoiron/sqlx"
)

func ShrinkPlainDocs(shrinkDocNum int) {
	// 本函数会将wiki2019zh数据库的文档规模缩小为shrinkDocNum篇
	// 支持1_000/10_000/100_000/1000_000，对应1k/10k/100k/1m
	shrinkDocNumStr := GetNumStr(shrinkDocNum)
	var err error
	srcDsn := fmt.Sprintf("%s:%s@tcp(%s:%d)/wiki2019zh",
		g.GetConfig().MysqlConfig.User,
		g.GetConfig().MysqlConfig.Password,
		g.GetConfig().MysqlConfig.Host,
		g.GetConfig().MysqlConfig.Port,
	)
	srcDb, err := sqlx.Connect("mysql", srcDsn)
	if err != nil {
		panic(err)
	}
	defer srcDb.Close()
	// init dest db
	destDbName := "wiki2019zh" + "_" + shrinkDocNumStr
	srcDb.MustExec(fmt.Sprintf(`CREATE DATABASE IF NOT EXISTS %s;`, destDbName))
	destDsn := fmt.Sprintf("%s:%s@tcp(%s:%d)/%s",
		g.GetConfig().MysqlConfig.User,
		g.GetConfig().MysqlConfig.Password,
		g.GetConfig().MysqlConfig.Host,
		g.GetConfig().MysqlConfig.Port,
		destDbName,
	)
	destDb, err := sqlx.Connect("mysql", destDsn)
	if err != nil {
		panic(err)
	}
	defer destDb.Close()
	destDb.MustExec(`DROP TABLE IF EXISTS documents;`)
	destDb.MustExec(`CREATE TABLE IF NOT EXISTS documents (
		id BIGINT PRIMARY KEY AUTO_INCREMENT,
		title VARCHAR(255) NOT NULL,
		file_type VARCHAR(255) NOT NULL,
		file_path VARCHAR(255) NOT NULL);`)
	const DocBatchSize = 1000

	lastId := 0
	for i := 0; i < shrinkDocNum/DocBatchSize; i++ {
		var docs []*models.PlainDocument
		// read from srcdb
		sql := fmt.Sprintf("select * from documents where id>%d order by id limit %d;", lastId, DocBatchSize)
		srcDb.Select(&docs, sql)
		// write to destdb
		sql = "insert into documents (id, title, file_type, file_path) values (:id, :title, :file_type, :file_path)"
		destDb.NamedExec(sql, docs)
		lastId = int(docs[len(docs)-1].Id)
	}
	log.Log(destDbName, "shrink finished.")
}

func ShrinkPlainDocs2CardEnc(shrinkDocNum int) {
	// 本函数会将wiki2019zh数据库的文档规模缩小为shrinkDocNum篇
	// 支持1_000/10_000/100_000/1000_000，对应1k/10k/100k/1m
	shrinkDocNumStr := GetNumStr(shrinkDocNum)
	var err error
	srcDsn := fmt.Sprintf("%s:%s@tcp(%s:%d)/wiki2019zh",
		g.GetConfig().MysqlConfig.User,
		g.GetConfig().MysqlConfig.Password,
		g.GetConfig().MysqlConfig.Host,
		g.GetConfig().MysqlConfig.Port,
	)
	srcDb, err := sqlx.Connect("mysql", srcDsn)
	if err != nil {
		panic(err)
	}
	defer srcDb.Close()
	// init dest db
	destDbName := "wiki2019zh" + "_" + shrinkDocNumStr + "_cardenc"
	srcDb.MustExec(fmt.Sprintf(`CREATE DATABASE IF NOT EXISTS %s;`, destDbName))
	destDsn := fmt.Sprintf("%s:%s@tcp(%s:%d)/%s",
		g.GetConfig().MysqlConfig.User,
		g.GetConfig().MysqlConfig.Password,
		g.GetConfig().MysqlConfig.Host,
		g.GetConfig().MysqlConfig.Port,
		destDbName,
	)
	destDb, err := sqlx.Connect("mysql", destDsn)
	if err != nil {
		panic(err)
	}
	defer destDb.Close()
	destDb.MustExec(`DROP TABLE IF EXISTS documents;`)
	destDb.MustExec(`CREATE TABLE IF NOT EXISTS documents (
		id BIGINT PRIMARY KEY AUTO_INCREMENT,
		title VARCHAR(255) NOT NULL,
		file_type VARCHAR(255) NOT NULL,
		file_path VARCHAR(255) NOT NULL);`)
	const DocBatchSize = 1000

	lastId := 0
	for i := 0; i < shrinkDocNum/DocBatchSize; i++ {
		var docs []*models.PlainDocument
		// read from srcdb
		sql := fmt.Sprintf("select * from documents where id>%d order by id limit %d;", lastId, DocBatchSize)
		srcDb.Select(&docs, sql)
		// write to destdb
		sql = "insert into documents (id, title, file_type, file_path) values (:id, :title, :file_type, :file_path)"
		destDb.NamedExec(sql, docs)
		lastId = int(docs[len(docs)-1].Id)
	}
	log.Log(destDbName, "shrink finished.")
}

func GetNumStr(num int) string {
	var numStr string
	switch num {
	case 1_000:
		numStr = "1k"
	case 10_000:
		numStr = "10k"
	case 100_000:
		numStr = "100k"
	case 1_000_000:
		numStr = "1m"
	default:
		panic("shrinkDocNum not support")
	}
	return numStr
}

func GetDbConn(user string, passwd string, host string, port int, dbName string) *sqlx.DB {
	dsn := fmt.Sprintf("%s:%s@tcp(%s:%d)/%s", user, passwd, host, port, dbName)
	db, err := sqlx.Connect("mysql", dsn)
	if err != nil {
		panic(err)
	}
	return db
}

// DeletePlainDB 删除明文数据库
func DeletePlainDB(db *sqlx.DB) {
	DropPlainDBTables(db)
	DeletePlainFiles()
}

func DeletePlainFiles() {
	err := os.RemoveAll(g.GetConfig().FheSearchConfig.FileBasePath + "AllPlainData/")
	if err != nil {
		panic(err)
	}
}

func DropPlainDBTables(db *sqlx.DB) {
	// drop if exists plain_term/plain_index
	for termLen := 1; termLen <= g.MaxTermLen; termLen++ {
		db.MustExec(fmt.Sprintf("drop table if exists plain_term%d ;", termLen))
	}
	db.MustExec("drop table if exists plain_index;")
	db.MustExec("drop table if exists documents;")
}

func InitPlainDBTables(db *sqlx.DB) {
	var sql string
	// drop if exists plain_term/plain_index
	for termLen := 1; termLen <= g.MaxTermLen; termLen++ {
		sql = fmt.Sprintf("drop table if exists plain_term%d ;", termLen)
		db.MustExec(sql)
	}
	sql = "drop table if exists plain_index;"
	db.MustExec(sql)
	// create if not exists plain_term/plain_index
	for termLen := 1; termLen <= g.MaxTermLen; termLen++ {
		// sql = fmt.Sprintf(`CREATE TABLE IF NOT EXISTS plain_term%d (
		// 	term_id BIGINT PRIMARY KEY AUTO_INCREMENT,
		// 	term VARCHAR(255),
		// 	KEY (term));`, termLen) // KEY (term) 会导致插入速度变慢
		sql = fmt.Sprintf(`CREATE TABLE IF NOT EXISTS plain_term%d (
			term_id BIGINT PRIMARY KEY AUTO_INCREMENT,
			term VARCHAR(255));`, termLen)
		db.MustExec(sql)
	}
	sql = `CREATE TABLE IF NOT EXISTS plain_index (
		term_id BIGINT,
		term_len INT,
		doc_id BIGINT,
		freq INT,
		PRIMARY KEY (term_id, term_len, doc_id));`
	db.MustExec(sql)
	log.Log("drop and create table plain_term/plain_index finished.")
}

// var (
// 	pkgVarPlainIndexChan chan *models.PlainIndexItem
// 	pkgVarPlainTermChan  chan *models.PlainTermItem

// 	// pkgVarPlainDB *sqlx.DB
// )

type AutoInc struct {
	mu sync.Mutex
	v  int
}

func (ai *AutoInc) GetInc() int {
	ai.mu.Lock()
	defer ai.mu.Unlock()
	ai.v++
	return ai.v
}

// GenerateBigPlainDB 高性能生成明文数据库的工具。缓存关键词+并行分词
func GenerateBigPlainDB(db *sqlx.DB) {
	// 运行本函数不能中断
	// 本函数会对数据库documents表中所有文档进行分词
	log.Log("GenerateBigPlainDB start.")
	// 生成plain_index和plain_term表
	var err error
	plainIndexChan := make(chan *models.PlainIndexItem, 20000)
	plainTermChan := make(chan *models.PlainTermItem, 20000)

	// pkgVarPlainDB = db
	InitPlainDBTables(db)

	const DocBatchSize = 1000
	// const DocBatchSize = 100

	// 先启动消费者，再启动生产者
	// 生产者生产完毕后，关闭通道，等待消费者结束
	wgProducer := &sync.WaitGroup{}
	wgConsumer := &sync.WaitGroup{}
	// const consumerNum = 3
	const consumerNum = 5
	for i := 0; i < consumerNum; i++ {
		wgConsumer.Add(1)
		go func() {
			defer wgConsumer.Done()
			writeToTablePlainIndex(db, plainIndexChan)
		}()
		wgConsumer.Add(1)
		go func() {
			defer wgConsumer.Done()
			writeToTablePlainTerm(db, plainTermChan)
		}()
	}
	const MaxProducerNum = 10
	producerChan := make(chan struct{}, MaxProducerNum)
	const shardN = 10
	termIdCache := shardedmap.NewShardedMap(shardN) // 用于缓存term对应的term_id
	lastId := 0
	docProcessingCnt := 0
	docProcessedCnt := &atomic.Int32{}
	autoIncTermId := &AutoInc{}
	for {
		var docs []*models.PlainDocument
		// var docs []*models.Document
		sql := fmt.Sprintf("select * from documents where id>%d order by id limit %d;", lastId, DocBatchSize)
		err = db.Select(&docs, sql)
		if err != nil {
			panic(err)
		}
		if len(docs) == 0 {
			break
		}
		lastId = docs[len(docs)-1].Id
		wgProducer.Add(1)
		producerChan <- struct{}{} // producer goroutine并发控制
		go func() {
			defer wgProducer.Done()
			// generateSmallPlainDB(db, docs)
			generateSmallPlainDB(plainIndexChan, plainTermChan, termIdCache, autoIncTermId, docs)
			docProcessedCnt.Add(int32(len(docs)))
			log.Logf("%d docs have been cut. last doc_id: %d. ShardedMap len: %d.",
				docProcessedCnt.Load(),
				docs[len(docs)-1].Id,
				termIdCache.ItemLen())
			<-producerChan
		}()
		docProcessingCnt += len(docs)
		log.Log("docs are processing: ", docProcessingCnt)
	}
	wgProducer.Wait()
	close(plainIndexChan)
	close(plainTermChan)
	wgConsumer.Wait()
	log.Logf("GenerateBigPlainDB generate finished. %d docs processed. ShardedMap len: %d.", docProcessedCnt.Load(), termIdCache.ItemLen())
}

func writeToTablePlainIndex(db *sqlx.DB, plainIndexChan chan *models.PlainIndexItem) {
	sql := "insert into plain_index (term_id, term_len, doc_id, freq) values (:term_id, :term_len, :doc_id, :freq)"
	const PlainIndexColumnNum = 4 // plain_index表的列数
	plainIndexItems := []interface{}{}
	for {
		plainIndexItem := <-plainIndexChan
		if plainIndexItem == nil {
			break
		}
		plainIndexItems = append(plainIndexItems, plainIndexItem)
		if len(plainIndexItems) >= 5_000 {
			BatchWriteToDB(db, sql, PlainIndexColumnNum, plainIndexItems)
			plainIndexItems = []interface{}{}
		}
	}
	if len(plainIndexItems) > 0 {
		BatchWriteToDB(db, sql, PlainIndexColumnNum, plainIndexItems)
	}
}

func writeToTablePlainTerm(db *sqlx.DB, plainTermChan chan *models.PlainTermItem) {
	sql := "insert into plain_term%d (term_id, term) values (:term_id, :term)"
	const PlainTermsColumnNum = 2
	plainTermItems := [g.MaxTermLen + 1][]interface{}{}
	cnt := 0
	for {
		plainTermItem := <-plainTermChan
		if plainTermItem == nil {
			break
		}
		cnt++
		termLen := utils.CharNum(plainTermItem.Term)
		plainTermItems[termLen] = append(plainTermItems[termLen], plainTermItem)

		for i := 1; i <= g.MaxTermLen; i++ {
			if len(plainTermItems[i]) == 0 {
				continue
			}
			if len(plainTermItems[i]) >= 5_000 {
				BatchWriteToDB(db, fmt.Sprintf(sql, i), PlainTermsColumnNum, plainTermItems[i])
				cnt -= len(plainTermItems[i])
				plainTermItems[i] = []interface{}{}
			}
		}
	}
	if cnt > 0 {
		for i := 1; i <= g.MaxTermLen; i++ {
			if len(plainTermItems[i]) == 0 {
				continue
			}
			BatchWriteToDB(db, fmt.Sprintf(sql, i), PlainTermsColumnNum, plainTermItems[i])
		}
	}
}

// func batchWriteToDB(sql string, columnNum int, data []interface{}) {
// 	if pkgVarPlainDB == nil {
// 		panic("pkgVarPlainDB is nil")
// 	}
// 	BatchWriteToDB(pkgVarPlainDB, sql, columnNum, data)
// }

func BatchWriteToDB(db *sqlx.DB, sql string, columnNum int, data []interface{}) {
	batchSize := g.MysqlMaxAllUnitNum / columnNum
	batchNum := len(data) / batchSize
	for i := 0; i < batchNum; i++ {
		_, err := db.NamedExec(sql, data[i*batchSize:(i+1)*batchSize])
		if err != nil {
			panic(err)
		}
	}
	if len(data)%batchSize != 0 {
		_, err := db.NamedExec(sql, data[batchNum*batchSize:])
		if err != nil {
			panic(err)
		}
	}
	// log.Log("batch write finished.", sql)
}

// func generateSmallPlainDB(db *sqlx.DB, docs []*models.PlainDocument) {
// func generateSmallPlainDB(docs []*models.Document) {
func generateSmallPlainDB(plainIndexChan chan *models.PlainIndexItem, plainTermChan chan *models.PlainTermItem, termIdCache *shardedmap.ShardedMap, autoIncTermId *AutoInc, docs []*models.PlainDocument) {
	var err error
	cutter := g.GetCutter()
	set := make(map[models.PlainIndexItem]bool)
	fileBasePath := g.GetConfig().FheSearchConfig.FileBasePath
	for i := 0; i < len(docs); i++ {
		filePath := fileBasePath + docs[i].FilePath
		var contentBytes []byte
		contentBytes, err = os.ReadFile(filePath)
		if err != nil {
			panic(err)
		}
		content := string(contentBytes) // 默认文件是txt
		// content := string(docs[i].Content)
		// 分词
		keywords := cutter.CutForSearch(content, true)
		frequency := utils.RemoveDuplicatesAndCount(keywords)
		for term, freq := range frequency {
			termLen := utils.CharNum(term)
			if termLen > g.MaxTermLen || termLen <= 1 || len(term) <= 2*termLen {
				// 过滤掉非中文词和单字词
				// 过滤太长的词
				continue
			}
			pit := &models.PlainIndexItem{
				// TermId:  getTermId(db, term),
				TermId:  getTermId2(plainTermChan, termIdCache, autoIncTermId, term),
				TermLen: termLen,
				DocId:   docs[i].Id,
				Freq:    freq,
			}
			_, ok := set[*pit]
			if !ok {
				set[*pit] = true
			} else {
				log.Logf("%#v %#v. duplicate term in one doc.", *pit, term)
				log.Log(frequency)
				panic("duplicate term in one doc")
			}
			plainIndexChan <- pit
		}
		// log.Logf("docs %d cut finished. doc_id: %d. cache used: %.2f. cache len: %d",
		// 	docProcessCnt.Load(),
		// 	docs[i].Id,
		// 	float64(cacheTermId.Len())/float64(cacheTermId.Cap()),
		// 	cacheTermId.Len())
	}
}

// var (
// 	autoTermId   = 1
// 	autoTermIdMu = &sync.Mutex{}
// )

// // TODO：多缓存实例，减少锁的竞争
// // const CacheCap = 20_000_000
// const CacheCap = 1_000_000

// var cacheTermId = lru.NewLRUCache(CacheCap)
// var cacheTermIdMutex = &sync.RWMutex{}

// func getTermId(db *sqlx.DB, term string) int {
// 	cacheTermIdMutex.Lock()
// 	defer cacheTermIdMutex.Unlock()
// 	// TODO：增加布隆过滤器
// 	termIdInterface, ok := cacheTermId.Get(term)
// 	if ok {
// 		return termIdInterface.(int)
// 	}
// 	termId := getTermIdFromDB(db, term)
// 	if termId == -1 {
// 		// 太慢了，1000篇文档costTime: 11m7.00151531s
// 		// tx := GetEncDBConn().MustBegin()
// 		// sql := fmt.Sprintf("INSERT INTO plain_term%d (term) VALUES (?)", utils.CharNum(term))
// 		// tx.MustExec(sql, term)
// 		// tx.Get(&termId, "SELECT LAST_INSERT_ID();")
// 		// tx.Commit()
// 		// 1000篇文档，用下面的方法 costTime: 8.682372125s
// 		termId = autoTermId
// 		autoTermId++
// 		plainTermItem := models.PlainTermItem{
// 			TermId: termId,
// 			Term:   term,
// 		}
// 		pkgVarPlainTermChan <- &plainTermItem
// 	}
// 	cacheTermId.Put(term, termId)
// 	return termId
// }

// func getTermIdFromDB(db *sqlx.DB, term string) int {
// 	termId := -1
// 	sql := fmt.Sprintf("select term_id from plain_term%d where term='%s';", utils.CharNum(term), term)
// 	err := db.Get(&termId, sql)
// 	if err != nil {
// 		termId = -1
// 	}
// 	return termId
// }

func getTermId2(plainTermChan chan *models.PlainTermItem, termIdCache *shardedmap.ShardedMap, autoIncTermId *AutoInc, term string) int {
	var termId int
	termIdInterface, ok := termIdCache.Get(term)
	if ok {
		termId = termIdInterface.(int)
		return termId
	}
	termId = autoIncTermId.GetInc()
	plainTermItem := models.PlainTermItem{
		TermId: termId,
		Term:   term,
	}
	plainTermChan <- &plainTermItem
	termIdCache.Set(term, termId)
	return termId
}
