package parallel

import (
	"bufio"
	"fmt"
	"io"
	"os"
	"strings"
	"sync"
	"sync/atomic"
)

const (
	title = "Life emerges from calamity and sorrow, but death comes from the joyful state of oblivion."
	c     = 10
)

// 同一个文件，多次打开，多次写入.
func MultiOpenWriteFile(fileName string) {
	wg := sync.WaitGroup{}
	wg.Add(c)
	for i := 0; i < c; i++ {
		go func() { // 将循环变量作为闭包参数传递
			defer wg.Done()
			//由于要被打开多次，所以就使用O_APPEND，而非O_TUNC
			fout, err := os.OpenFile(fileName, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0o644)
			if err != nil {
				panic(err)
			}
			defer fout.Close()
			for j := 0; j < 100; j++ {
				fout.WriteString(title)
			}
		}()
	}
	wg.Wait() // 等待所有协程完成
}

// 一次打开，打开后使用协程每次写入
func OneOpenWriteFile(fout *os.File) {
	wg := sync.WaitGroup{}
	wg.Add(c)
	for i := 0; i < c; i++ {
		go func() {
			defer wg.Done()
			for j := 0; j < 100; j++ {
				fout.WriteString(title)
			}
		}()
	}
	wg.Wait()
}

// 把一个文件平分成互不相交的 count 份，且每一份都是新的一行，返回每一份开始的位置.
func GetConcurrentReadPosition(infile string, count int) (positions []int64, err error) {
	fin, err := os.Open(infile)
	if err != nil {
		return nil, err
	}

	defer fin.Close()

	stat, err := fin.Stat()

	if err != nil {
		return nil, err
	}

	fileSize := stat.Size()                         // 文件大小
	size := fileSize / int64(count)                 // 切分后每个切片的大小
	dupPositions := make(map[int64]struct{}, count) //使用切片保证起始位置不会重复
	positions = append(positions, 0)                // 第一份肯定是从 0 开始
	dupPositions[0] = struct{}{}

	var i int64
	buffer := make([]byte, 1024) //每次只从文件中读取 1k 内容
	for i = 1; i < int64(count); i++ {
		blockBegin := size * i //定位到大概位置，开始逐个字节查找 \n
		fin.Seek(blockBegin, 0)
		var foundReturn bool // 是否找到换行符
		var pos int64        // 第一个换行符位置
	LOOP:
		for j := 0; j < 10; j++ { // 每次读取 1k 设定为10k之内肯定会有 \n 找不到的话向 positions 里添加元素
			n, err := fin.Read(buffer) // 读取内容
			if err != nil {
				if err == io.EOF {
					break LOOP // 读到文件末尾都没找到
				} else {
					return nil, err
				}
			}
			for i, ele := range buffer[:n] {
				// 从读取的内容中截取 1k 逐字节查找 \n
				if ele == '\n' {
					pos += int64(i)
					foundReturn = true
					break LOOP
				}
			}
			pos += int64(n)
		}

		if foundReturn {
			ele := blockBegin + pos + 1 // 处理后续循环
			if ele >= fileSize {
				break
			}
			if _, exists := dupPositions[ele]; exists {
				break
			} else {
				dupPositions[ele] = struct{}{}
				positions = append(positions, ele)
			}
		} else {
			// 10K 内没找到换行符直接跳过
			return positions, nil
		}
	}
	return positions, nil
}

func processFile(fin *os.File, readTotal int64) {
	reader := bufio.NewReader(fin)
	var readCnt int64
	for {
		log, err := reader.ReadString('\n')
		if readTotal > 0 {
			readCnt += int64(len(log))
			if readCnt > readTotal {
				break
			}
		}
		if err != nil {
			if err == io.EOF {
				if len(log) > 0 {
					n := ExtractNumber(log)
					if n >= 0 {
						// fileCount += int32(n)
						atomic.AddInt32(&fileCount, int32(n)) //并发安全
					}
				}
			} else {
				fmt.Printf("读文件失败:%v", err)
			}
			break
		} else {
			log = strings.TrimRight(log, "\n")
			if len(log) > 0 {
				n := ExtractNumber(log)
				if n >= 0 {
					// fileCount += int32(n)
					atomic.AddInt32(&fileCount, int32(n)) //并发安全
				}
			}
		}
	}
}

// 把一个文件分成concurrent段，并行读取
func ConcurrentReadOneFile(infile string, n int, f func(*os.File, int64)) error {
	positions, err := GetConcurrentReadPosition(infile, n)
	if err != nil {
		return err
	}

	wg := sync.WaitGroup{}
	wg.Add(len(positions))
	for i, position := range positions {
		currPosition := position
		var nextPositoin int64
		if i < len(positions)-1 {
			nextPositoin = positions[i+1]
		} else {
			nextPositoin = -1
		}
		go func(currPosition, nextPositoin int64) {
			defer wg.Done()
			fin, err := os.Open(infile)
			if err != nil {
				fmt.Printf("open file %s failed: %v", infile, err)
				return
			}
			defer fin.Close()
			fin.Seek(currPosition, 0)
			if nextPositoin > currPosition {
				f(fin, nextPositoin-currPosition)
			} else {
				f(fin, 0)
			}
		}(currPosition, nextPositoin)
	}
	wg.Wait()
	return nil
}
