package outPut

import (
	"bufio"
	"fmt"
	"db2s/ref"
	"io"
	"math"
	"os"
	"reflect"
	"runtime"
	"strings"
	"sync"
)

type FileOperate struct {
	File     *os.File
	BufSize  int
	SqlType  string
	fileName string
}
type Text struct {
	FileN                 *os.File `json:"filen"`
	LogSeq                int64    `json:"logSeq"`
	BufSize               int      `json:"bufSize"`
	FileName              string   `json:"fileName"`
	SqlType               string   `json:"sqlType"`
	RowsSplistCharacter   string   `json:"rowsS"`
	ColumnSplistCharacter string   `json:"columnS"`
	CurryThreads          int      `json:"curry"`
	Queue                 int      `json:"queue"`
}

func (t Text) CreateFile() (bool, error) {
	return false, nil
}
func (t Text) WriteString(bufWriter *bufio.Writer, c string) bool {
	wc, err := bufWriter.WriteString(c)
	if err != nil {
		return false
	}
	if err = bufWriter.Flush(); err != nil {
	}
	if wc != len(c) {
		//vlog = fmt.Sprintf("(%d) %s The number of written bytes of file %s does not match the number of successful bytes, the number of written bytes is {%v}, and the number of successful bytes is {%v}", t.LogSeq, event, t.CreateFileName, len(c), wc)
		//global.Wlog.Error(vlog)
		return false
	}
	return true
}
func (t Text) AppendWrite(_ string, writeString any) bool {
	if writeString == nil {
		return false
	}
	bufWriter := bufio.NewWriterSize(t.FileN, t.BufSize)
	switch fmt.Sprintf("%v", reflect.TypeOf(writeString)) {
	case "string":
		if !t.WriteString(bufWriter, writeString.(string)) {
			return false
		}
	case "*string":
		if !t.WriteString(bufWriter, *writeString.(*string)) {
			return false
		}
	case "[]*string":
		for _, i := range writeString.([]*string) {
			var writeData string
			if strings.EqualFold(t.RowsSplistCharacter, "\\n") {
				writeData = fmt.Sprintf("%v\n", *i)
			} else {
				writeData = fmt.Sprintf("%v%v", *i, t.RowsSplistCharacter)
			}
			if !t.WriteString(bufWriter, writeData) {
				return false
			}
		}
	case "[][]*string":
		for _, i := range writeString.([][]*string) {
			var l = make([]string, len(i))
			for iii, ii := range i {
				p := *ii
				l[iii] = p
			}
			var writeData string
			if strings.EqualFold(t.RowsSplistCharacter, "\\n") {
				writeData = fmt.Sprintf("%v\n", strings.Join(l, t.ColumnSplistCharacter))
			} else {
				writeData = fmt.Sprintf("%v%v", strings.Join(l, t.ColumnSplistCharacter), t.RowsSplistCharacter)
			}
			if !t.WriteString(bufWriter, writeData) {
				return false
			}
		}
		ref.SetFinalizer(writeString)
	case "[]interface {}":
		for _, i := range writeString.([]any) {
			if !t.AppendWrite("", i) {
				return false
			}
		}
	}
	return true
}
func (t Text) Read() any {
	var (
		textContent = make(chan any, 10000)
		currySum    = make(chan struct{}, t.CurryThreads)
	)
	//sync pools to reuse the memory and decrease the preassure on //Garbage Collector
	linesPool := sync.Pool{New: func() interface{} {
		lines := make([]byte, 500*1024)
		return lines
	}}
	stringPool := sync.Pool{New: func() interface{} {
		lines := ""
		return lines
	}}
	go func() {
		file, err := os.Open(t.FileName)
		if err != nil {
			fmt.Println(err)
		}
		bufReader := bufio.NewReader(file)
		var wg sync.WaitGroup //wait group to keep track off all threads
		var interceptByte []byte
		for {
			var n int
			var nextUntillNewline []byte
			buf := linesPool.Get().([]byte)
			n, err = bufReader.Read(buf)
			if err != nil && err != io.EOF {
				fmt.Println("-----error:", err)
				break
			}
			buf = buf[:n]
			result := append(interceptByte, buf...)
			interceptByte = []byte{}
			if len(t.RowsSplistCharacter) == 0 {
				nextUntillNewline, err = bufReader.ReadBytes('\n') //read entire line
				if err != io.EOF {
					lineStr := string(nextUntillNewline)
					lineStr = strings.Replace(lineStr, "\n", t.RowsSplistCharacter, -1) // replace '\n' with 'rows'
					nextUntillNewline = []byte(lineStr)
					buf = append(buf, nextUntillNewline...)
				}
			}
			var f1 []byte
			if !strings.HasSuffix(string(result), t.RowsSplistCharacter) {
				if n1 := strings.LastIndex(string(result), t.RowsSplistCharacter); n1 != -1 {
					f1 = []byte(string(result)[:n1+len(t.RowsSplistCharacter)])
					interceptByte = []byte(string(result)[n1+len(t.RowsSplistCharacter):])
				}
			} else {
				f1 = result
			}
			wg.Add(1)
			currySum <- struct{}{}
			func() {
				//go func() {
				//process each chunk concurrently
				//start -> log start time, end -> log end time
				t.processChunk(f1, &linesPool, &stringPool, textContent)
				<-currySum
				wg.Done()
			}()
			if err == io.EOF {
				break
			}
		}
		wg.Wait()
		close(textContent)
		if err := file.Close(); err != nil {
			fmt.Println(err)
		}
	}()
	return textContent
}
func (t Text) stringToPointSlice(s string) any {
	var (
		result any
	)
	if len(t.ColumnSplistCharacter) > 0 {
		var manyResult []*string
		s1 := s
		for {
			var index, columnSplistLength int
			if strings.EqualFold(t.ColumnSplistCharacter, "\\n") {
				index = strings.Index(s1, "\n")
				columnSplistLength = 1
			} else {
				index = strings.Index(s1, t.ColumnSplistCharacter)
				columnSplistLength = len(t.ColumnSplistCharacter)
			}
			if index == -1 {
				if len(s1) > 0 {
					manyResult = append(manyResult, &s1)
				}
				break
			}
			line := s1[:index]
			manyResult = append(manyResult, &line)
			s1 = s1[index+columnSplistLength:]
		}
		result = manyResult
	} else {
		result = &s
	}
	return result
}
func (t Text) processChunk(chunk []byte, linesPool *sync.Pool, stringPool *sync.Pool, textContent chan any) {
	var (
		wg2       sync.WaitGroup
		logsSlice []string
	)
	logs := stringPool.Get().(string)
	logs = string(chunk)
	linesPool.Put(chunk)
	startIndex := 0
	for {
		var index, rowsSplistLength int
		if strings.EqualFold(t.RowsSplistCharacter, "\\n") {
			index = strings.Index(logs[startIndex:], "\n")
			rowsSplistLength = 1
		} else {
			index = strings.Index(logs[startIndex:], t.RowsSplistCharacter)
			rowsSplistLength = len(t.RowsSplistCharacter)
		}
		if index == -1 {
			break
		}
		line := logs[startIndex : startIndex+index]
		logsSlice = append(logsSlice, line)
		startIndex += index + rowsSplistLength
	}
	logsSlice = append(logsSlice, logs[startIndex:])
	stringPool.Put(logs)
	chunkSize := 300
	n := len(logsSlice)
	noOfThread := n / chunkSize
	if n%chunkSize != 0 {
		noOfThread++
	}
	for i := 0; i < (noOfThread); i++ {
		wg2.Add(1)
		go func(s int, e int) {
			defer wg2.Done() //to avaoid deadlocks
			for ii := s; ii < e; ii++ {
				texts := logsSlice[ii]
				if len(texts) == 0 {
					continue
				}
				textContent <- t.stringToPointSlice(texts)
			}
		}(i*chunkSize, int(math.Min(float64((i+1)*chunkSize), float64(len(logsSlice)))))
	}
	wg2.Wait()
	logsSlice = nil
}

func (t Text) Read1() any {
	var (
		textContent = make(chan any, t.Queue)
		currySum    = make(chan struct{}, t.CurryThreads)
	)
	//sync pools to reuse the memory and decrease the preassure on //Garbage Collector
	linesPool := sync.Pool{New: func() interface{} {
		lines := make([]byte, 32*1024*1024)
		return lines
	}}
	stringPool := sync.Pool{New: func() interface{} {
		lines := ""
		return lines
	}}
	go func() {
		file, err := os.Open(t.FileName)
		if err != nil {
			fmt.Println("-----file:", t.FileName, err)
		}
		bufReader := bufio.NewReader(file)
		var wg sync.WaitGroup //wait group to keep track off all threads
		var interceptByte []byte
		for {
			var n int
			buf := linesPool.Get().([]byte)
			n, err = bufReader.Read(buf)
			if err != nil && err != io.EOF {
				fmt.Println("-----error:", err)
				break
			} else if err == io.EOF {
				break
			}
			buf = buf[:n]
			result := append(interceptByte, buf...)
			var f1 []byte
			var lastIndex, rowsSplistLength int
			if strings.EqualFold(t.RowsSplistCharacter, "\\n") {
				lastIndex = strings.LastIndex(string(result), "\n")
				rowsSplistLength = 1
			} else {
				lastIndex = strings.LastIndex(string(result), t.RowsSplistCharacter)
				rowsSplistLength = len(t.RowsSplistCharacter)
			}
			if lastIndex == -1 && len(result) > 0 { //找到了，清空上次缓存
				lastIndex = len(result)
				rowsSplistLength = 0
			}
			interceptByte = []byte{}
			f1 = append(f1, result[:lastIndex]...)
			stockBytes := result[lastIndex+rowsSplistLength:]
			if len(stockBytes) > 0 {
				interceptByte = append(interceptByte, stockBytes...)
			}
			wg.Add(1)
			currySum <- struct{}{}
			go func() {
				//process each chunk concurrently
				//start -> log start time, end -> log end time
				t.processChunk(f1, &linesPool, &stringPool, textContent)
				<-currySum
				wg.Done()
			}()
		}
		wg.Wait()
		close(textContent)
		if err1 := file.Close(); err1 != nil {
			fmt.Println(err1)
			return
		}
	}()
	return textContent
}
func (t Text) Close() (err error) {
	return
}

type Tex struct {
	FileName              string
	Action                string
	fin                   *os.File
	manyFin               []*os.File
	Truncate              bool
	Threads               int
	Queue                 int
	SheetName             []string
	CSymbol               string
	RSymbol               string
	Safety                bool
	Mode                  string
	Enroll                []any
	password              string
	linesPool, stringPool sync.Pool
}

func (t *Tex) Init() (err error) {
	var q any
	if q, err = t.Open(t.FileName); err != nil {
		return err
	} else {
		t.fin = q.(*os.File)
	}
	t.Queue = 1000
	t.Threads = 1
	return
}
func (t *Tex) GetPassword() string {
	return t.password
}
func (t *Tex) Head() (err error) {
	t.manyFin = make([]*os.File, len(t.SheetName))
	for kk, vv := range t.SheetName {
		for _, v := range t.Enroll {
			val := reflect.ValueOf(v)
			tType := val.Type()
			var sheetName string
			var persistence string
			for i := 0; i < tType.NumField(); i++ {
				switch tType.Field(i).Type.Kind() {
				case reflect.Struct:
				case reflect.String:
					switch tType.Field(i).Name {
					case "SheetName":
						sheetName = val.Type().Field(i).Tag.Get("json")
					case "Persistence":
						persistence = val.Field(i).String()
					}
				default:

				}
			}
			if strings.EqualFold(vv, sheetName) {
				var fin *os.File
				if fin, err = os.OpenFile(fmt.Sprintf("%v/%v.data", persistence, sheetName), os.O_WRONLY|os.O_CREATE|os.O_TRUNC|os.O_APPEND, 0666); err != nil {
					return err
				}
				t.manyFin[kk] = fin
			}
		}
	}
	return
}
func (t *Tex) Open(fileName string) (res any, err error) {
	if t.Truncate {
		if res, err = os.OpenFile(fileName, os.O_WRONLY|os.O_CREATE|os.O_APPEND|os.O_TRUNC, 0666); err != nil {
			return
		}
		return
	}
	if res, err = os.OpenFile(fileName, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0666); err != nil {
		return
	}
	return
}
func (t *Tex) Close() (err error) {
	for _, v := range append(t.manyFin, t.fin) {
		if err = v.Close(); err != nil {
			return
		}
	}
	return
}
func (t *Tex) linesP() {
	t.linesPool = sync.Pool{New: func() interface{} {
		lines := make([]byte, 32*1024*1024)
		return lines
	}}
}
func (t *Tex) stringP() {
	t.stringPool = sync.Pool{New: func() interface{} {
		lines := ""
		return lines
	}}
}
func errQuit(err error) bool {
	if err != nil && err != io.EOF {
		return true
	} else if err == io.EOF {
		return true
	}
	return false
}
func (t Tex) stringToPointSlice(s string) any {
	var (
		result any
	)
	if len(t.CSymbol) > 0 {
		var manyResult []*string
		s1 := s
		for {
			var index, columnSplistLength int
			if strings.EqualFold(t.CSymbol, "\\n") {
				index = strings.Index(s1, "\n")
				columnSplistLength = 1
			} else {
				index = strings.Index(s1, t.CSymbol)
				columnSplistLength = len(t.CSymbol)
			}
			if index == -1 {
				if len(s1) > 0 {
					manyResult = append(manyResult, &s1)
				}
				break
			}
			line := s1[:index]
			manyResult = append(manyResult, &line)
			s1 = s1[index+columnSplistLength:]
		}
		result = manyResult
	} else {
		result = &s
	}
	return result
}
func (t Tex) processChunk(chunk []byte, linesPool *sync.Pool, stringPool *sync.Pool, textContent chan any) {
	var (
		wg2       sync.WaitGroup
		logsSlice []string
	)
	logs := stringPool.Get().(string)
	logs = string(chunk)
	linesPool.Put(chunk)
	startIndex := 0
	for {
		var index, rowsSplitLength int
		if strings.EqualFold(t.RSymbol, "\\n") {
			index = strings.Index(logs[startIndex:], "\n")
			rowsSplitLength = 1
		} else {
			index = strings.Index(logs[startIndex:], t.RSymbol)
			rowsSplitLength = len(t.RSymbol)
		}
		if index == -1 {
			break
		}
		line := logs[startIndex : startIndex+index]
		logsSlice = append(logsSlice, line)
		startIndex += index + rowsSplitLength
	}
	logsSlice = append(logsSlice, logs[startIndex:])
	stringPool.Put(logs)
	chunkSize := 300
	n := len(logsSlice)
	noOfThread := n / chunkSize
	if n%chunkSize != 0 {
		noOfThread++
	}
	for i := 0; i < (noOfThread); i++ {
		wg2.Add(1)
		go func(s int, e int) {
			defer wg2.Done() //to avaoid deadlocks
			for ii := s; ii < e; ii++ {
				texts := logsSlice[ii]
				if len(texts) == 0 {
					continue
				}
				textContent <- t.stringToPointSlice(texts)
			}
		}(i*chunkSize, int(math.Min(float64((i+1)*chunkSize), float64(len(logsSlice)))))
	}
	wg2.Wait()
	logsSlice = nil
}
func (t *Tex) Read() (res any, err error) {
	var (
		textContent = make(chan any, t.Queue)
		currySum    = make(chan struct{}, t.Threads)
	)
	//sync pools to reuse the memory and decrease the preassure on //Garbage Collector
	linesPool := sync.Pool{New: func() interface{} {
		lines := make([]byte, 32*1024*1024)
		return lines
	}}
	stringPool := sync.Pool{New: func() interface{} {
		lines := ""
		return lines
	}}
	var file *os.File
	if file, err = os.Open(t.FileName); err != nil {
		return
	}
	go func() {
		bufReader := bufio.NewReader(file)
		var wg sync.WaitGroup //wait group to keep track off all threads
		var interceptByte []byte
		for {
			var n int
			buf := linesPool.Get().([]byte)
			n, err = bufReader.Read(buf)
			if err != nil && err != io.EOF {
				break
			} else if err == io.EOF {
				break
			}
			buf = buf[:n]
			result := append(interceptByte, buf...)
			var f1 []byte
			var lastIndex, rowsSplitLength int
			if strings.EqualFold(t.RSymbol, "\\n") {
				lastIndex = strings.LastIndex(string(result), "\n")
				rowsSplitLength = 1
			} else {
				lastIndex = strings.LastIndex(string(result), t.RSymbol)
				rowsSplitLength = len(t.RSymbol)
			}
			if lastIndex == -1 && len(result) > 0 { //找到了，清空上次缓存
				lastIndex = len(result)
				rowsSplitLength = 0
			}
			interceptByte = []byte{}
			f1 = append(f1, result[:lastIndex]...)
			stockBytes := result[lastIndex+rowsSplitLength:]
			if len(stockBytes) > 0 {
				interceptByte = append(interceptByte, stockBytes...)
			}
			wg.Add(1)
			currySum <- struct{}{}
			func() {
				//process each chunk concurrently
				//start -> log start time, end -> log end time
				t.processChunk(f1, &linesPool, &stringPool, textContent)
				<-currySum
				wg.Done()
			}()
		}
		wg.Wait()
		close(textContent)
		if err1 := file.Close(); err1 != nil {
			fmt.Println(err1)
			return
		}
	}()
	return textContent, nil
}
func (t *Tex) Create() (err error) {
	return
}
func (t *Tex) Print(s Print) (r any) {
	switch s {
	case FName:
		return t.FileName
	}
	return
}
func (t *Tex) WriteString(bufWriter *bufio.Writer, c string) (err error) {
	var wc int
	wc, err = bufWriter.WriteString(c)
	if err != nil {
		return
	}
	if err = bufWriter.Flush(); err != nil {
		return
	}
	if wc != len(c) {
		return
	}
	return
}
func (t *Tex) Write(sheetName string, writeS any) (err error) {
	var fin *os.File
	var cSymbol, rSymbol string
	if writeS == nil {
		return
	}
	if len(t.CSymbol) == 0 {
		cSymbol = CSymbol
	} else {
		cSymbol = t.CSymbol
	}
	if len(t.RSymbol) == 0 {
		rSymbol = RSymbol
	} else {
		rSymbol = t.RSymbol
	}
	for k, v := range t.SheetName {
		if strings.EqualFold(v, sheetName) {
			fin = t.manyFin[k]
			break
		}
	}
	if fin == nil {
		if t.fin == nil {
			return
		}
		fin = t.fin
	}
	bufWriter := bufio.NewWriterSize(fin, 1024)
	switch writeS.(type) {
	case string:
		if err = t.WriteString(bufWriter, writeS.(string)); err != nil {
			return
		}
	case *string:
		if err = t.WriteString(bufWriter, *writeS.(*string)); err != nil {
			return
		}
	case []*string:
		for _, i := range writeS.([]*string) {
			var writeData string
			if strings.EqualFold(rSymbol, "\\n") {
				writeData = fmt.Sprintf("%v\n", *i)
			} else {
				writeData = fmt.Sprintf("%v%v", *i, rSymbol)
			}
			if err = t.WriteString(bufWriter, writeData); err != nil {
				return
			}
		}
	case []string:
		for _, i := range writeS.([]string) {
			var writeData string
			if strings.EqualFold(rSymbol, "\\n") {
				writeData = fmt.Sprintf("%v\n", i)
			} else {
				writeData = fmt.Sprintf("%v%v", i, rSymbol)
			}
			if err = t.WriteString(bufWriter, writeData); err != nil {
				return
			}
		}
	case [][]*string:
		for _, i := range writeS.([][]*string) {
			var l = make([]string, len(i))
			for iii, ii := range i {
				p := *ii
				runtime.SetFinalizer(ii, nil)
				l[iii] = p
			}
			var writeData string
			if strings.EqualFold(rSymbol, "\\n") {
				writeData = fmt.Sprintf("%v\n", strings.Join(l, cSymbol))
			} else {
				writeData = fmt.Sprintf("%v%v", strings.Join(l, cSymbol), rSymbol)
			}
			if err = t.WriteString(bufWriter, writeData); err != nil {
				return
			}
		}
	case []interface{}:
		for _, i := range writeS.([]any) {
			if err = t.Write(sheetName, i); err != nil {
				return
			}
		}
	}
	return
}
func (t *Tex) Delete(File any) (err error) {
	return
}
func (t *Tex) Rename(File any) (err error) {
	return
}

func NewTex() *Tex {
	return &Tex{}
}
