//1.搜集，就是我们常说的利用爬虫爬取网页。
package main

import (
	"SearchEngine/collect/bloomFilter"
	"bufio"
	"fmt"
	"io"
	"net/http"
	"os"
	"strconv"
	"strings"
	"sync"
	"time"
)

var (
	urls             = make(chan []byte, 10) // 放入从urls.bin读取的URL，string类型不可变
	count int64      = 0                     // 统计已爬取的网页总数,作为网页编号
	mutex sync.Mutex                         // count变量的锁
)

// 读取待爬取网页,获取url，放入通道urls中
func readUrlsFile() {
	fmt.Println("开始读取urls.bin文件")
	defer func() {
		fmt.Println("终止读取url.bin文件")
	}()
	file, err := os.Open("/home/fcou/go/src/SearchEngine/collect/urls/urls.bin")
	if err != nil {
		fmt.Println("file open error:", err)
		return
	}
	defer file.Close()

	reader := bufio.NewReader(file)
	for {
		oneUrl, err := reader.ReadString('\n') //按行读取
		if err == io.EOF {
			fmt.Println("error:", err)
			time.Sleep(time.Minute)
			continue
		}
		if err != nil {
			fmt.Println("error:", err)
			return
		}
		oneUrl = strings.Replace(oneUrl, "\n", "", -1)
		urls <- []byte(oneUrl) // 相当于url生产端
	}
}

// 开始运行爬虫
func runCrawler() {
	fmt.Printf("开始运行爬虫...\n")

	// //启动10个go程，开始爬取网页数据
	// for i := 0; i < 10; i++ {
	CrawlPage()
	// }
}

// 爬取一个网页数据信息
func CrawlPage() {
	for {
		// 获取网页url
		url := <-urls
		// 利用布隆过滤器检查url是否已爬过
		// mutex.Lock()
		if bloomFilter.BloomFilter.Find(string(url)) {
			continue //重复则读取下一个url
		} else {
			bloomFilter.BloomFilter.Store(string(url))
			fmt.Printf("开始爬取%s网页数据信息\n", string(url))
		}
		// mutex.Unlock()

		// 网页编号
		// mutex.Lock() // 这个锁出了问题，该go程释放后再也没抢到，会无法继续
		count++
		fileIndex := count
		// mutex.Unlock()
		// 获取 url 对应网页源代码
		SourceCode, err := GetHTMLSourceCode(string(url))
		if err != nil {
			fmt.Println("GetHTMLSourceCode err:", err)
			return
		}
		// 网页全部相关信息写入doc_raw文件中
		// fileNum := strconv.Itoa(number)
		// rawFileName := "/home/fcou/go/src/SearchEngine/collect/docRaw/doc_raw_" + fileNum + ".bin"
		if len(SourceCode) != 0 {
			rawFileName := "/home/fcou/go/src/SearchEngine/collect/docRaw/doc_raw.bin"
			SaveRawTofile(rawFileName, fileIndex, SourceCode)
			// 网页编号信息写入doc_id文件中
			idFileName := "/home/fcou/go/src/SearchEngine/collect/docId/doc_id.bin"
			SaveIdTofile(idFileName, fileIndex, string(url))
		}
	}
}

// 爬取指定url的网页，访问网页，最终返回网页源代码result
func GetHTMLSourceCode(url string) (result string, err error) {
	//模拟用户访问
	req := buildRequest(url)
	//访问网页
	resp, err1 := http.DefaultClient.Do(req)
	if err != nil {
		err = err1
		return
	}
	defer resp.Body.Close()

	// 循环爬取整页数据
	var code strings.Builder //字符串拼接效率高
	buf := make([]byte, 4096)
	for {
		n, err2 := resp.Body.Read(buf)
		if n == 0 {
			break
		}
		if err2 != nil && err2 != io.EOF {
			err = err2
			return
		}
		code.WriteString(string(buf[:n]))
	}
	result = code.String()
	return
}

//模拟用户访问
func buildRequest(url string) *http.Request {
	req, err := http.NewRequest("GET", url, nil)
	if err != nil {
		fmt.Println(err)
		panic(err)
	}
	// req.Header.Set("Accept", "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9")
	// req.Header.Set("Accept-Encoding", "")
	// req.Header.Set("Accept-Language", "zh-CN,zh;q=0.9,en;q=0.8,zh-TW;q=0.7")
	// req.Header.Set("Cache-Control", "max-age=0")
	// req.Header.Set("Connection", "keep-alive")
	// req.Header.Set("Cookie", `bid="MHwSHRj6vNs"; __utmc=30149280; gr_user_id=d0cceb73-bdd4-4a95-bdb7-6390d61b953e; _vwo_uuid_v2=DC78F900C51EC26EE5B2BDF8F0625F0FF|17749040d5e008390ff3ea596e447f19; __gads=ID=92f1c26cd956ec85:T=1578361834:S=ALNI_MauwB34k4gkGIBFKOuwSdJek9HV1w; viewed="2230248_19952400_6756090_26979890_34837875_10593804_23774479_34442131"; douban-fav-remind=1; ll="118282"; __utmc=223695111; __yadk_uid=le51ePJoZPKQ4lSsX11DG4DUDK2AnYii; _ga=GA1.2.1713730617.1577439810; _gid=GA1.2.1947843609.1583162090; UM_distinctid=1709bd074951a-0d7d124de7484a-396c7406-13c680-1709bd074974; Hm_lvt_19fc7b106453f97b6a84d64302f21a04=1583162095; Hm_lpvt_19fc7b106453f97b6a84d64302f21a04=1583162095; _pk_ref.100001.4cf6=%5B%22%22%2C%22%22%2C1583162115%2C%22https%3A%2F%2Fwww.google.com%2F%22%5D; _pk_ses.100001.4cf6=*; ap_v=0,6.0; __utma=30149280.1713730617.1577439810.1583154526.1583162116.12; __utmb=30149280.0.10.1583162116; __utmz=30149280.1583162116.12.10.utmcsr=google|utmccn=(organic)|utmcmd=organic|utmctr=(not%20provided); __utma=223695111.684028627.1583152805.1583152806.1583162116.3; __utmb=223695111.0.10.1583162116; __utmz=223695111.1583162116.3.3.utmcsr=google|utmccn=(organic)|utmcmd=organic|utmctr=(not%20provided); _pk_id.100001.4cf6=9e016b10f837a733.1583152803.2.1583163050.1583154928.`)
	// req.Header.Set("Host", "movie.douban.com")
	// req.Header.Set("Referer", "https://www.google.com/")
	// req.Header.Set("Sec-Fetch-Dest", "document")
	// req.Header.Set("Sec-Fetch-Mode", "navigate")
	// req.Header.Set("Sec-Fetch-Site", "cross-site")
	// req.Header.Set("Sec-Fetch-User", "?1")
	// req.Header.Set("Upgrade-Insecure-Requests", "1")
	// req.Header.Set("User-Agent", "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.122 Safari/537.36")
	return req
}

// 将原始网页存储到文件：doc_raw.bin中，多网页存储在一个文件内，用分割符
func SaveRawTofile(filmName string, index int64, Data string) {
	f, err := os.OpenFile(filmName, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0644)
	if err != nil {
		fmt.Println("SaveRawTofile:", err)
		return
	}
	defer f.Close()
	//使用 bufio.NewWriter 写入文件
	w := bufio.NewWriter(f)
	fmt.Println("开始保存网页信息到文件")
	//先写入网页编号
	indexString := strconv.Itoa(int(index))
	w.WriteString(indexString + "\t")
	//再写入网页大小
	sizeOfData := strconv.Itoa(int(len(Data)))
	w.WriteString(sizeOfData + "\t")
	//再写入网页内容
	w.WriteString(Data)
	//最后写入分割符
	w.WriteString("\r\n\r\n")
	w.Flush()
}

// 将原始网页链接地址和对应编号存储到文件：doc_id.bin中
func SaveIdTofile(filmName string, index int64, url string) {
	f, err := os.OpenFile(filmName, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0644)
	if err != nil {
		fmt.Println("SaveRawTofile:", err)
		return
	}
	defer f.Close()
	//使用 bufio.NewWriter 写入文件
	w := bufio.NewWriter(f)
	//先写入网页编号
	indexString := strconv.Itoa(int(index))
	w.WriteString(indexString + ":")
	//再写入网页链接
	w.WriteString(url + "\r\n")
	w.Flush()
}

//判断文件是否存在  存在返回 true 不存在返回false
func checkFileIsExist(filename string) bool {
	var exist = true
	if _, err := os.Stat(filename); os.IsNotExist(err) {
		exist = false
	}
	return exist
}

// 保存BloomFilter到文件中，持久化存储
func saveBloomFilter() {
	for {
		//打开文件
		bloomFilterFileName := "/home/fcou/go/src/SearchEngine/collect/bloomFilter/bloom_filter.bin"
		var f *os.File
		var err error
		if checkFileIsExist(bloomFilterFileName) { //如果文件存在
			err = os.Remove(bloomFilterFileName) //删除文件
			if err != nil {
				fmt.Println("saveBloomFilter:", err)
				return
			}
		}
		f, err = os.Create(bloomFilterFileName) //创建文件
		if err != nil {
			fmt.Println("saveBloomFilter:", err)
			return
		}

		defer f.Close()
		//使用 bufio.NewWriter 写入文件
		w := bufio.NewWriter(f)
		//持久化存储
		// type BitMap struct {
		// 	bytes []byte // 从0到max，每8个数对应一个byte存储，如果该位存了数字则设置为1
		// 	max   uint32 //可存储的最大值，这里设置为 201326592
		// }
		for _, byte := range bloomFilter.BloomFilter.Bytes {
			w.WriteString(string(byte) + "\t")
		}

		// w.WriteString("max:" + string(bloomFilter.BloomFilter.Max) + "\n")
		w.Flush()
		fmt.Println("已保存BloomFilter到文件中")
		// 休眠10分钟
		time.Sleep(time.Minute * 10)
	}
}

func main() {
	//将网页链接不断放入chan缓存中
	go readUrlsFile()
	//读取chan缓存，获得链接后爬取网页
	go runCrawler()
	//每10分钟保存一次已爬取的URL到bloom_filter.bin文件中
	go saveBloomFilter()
	for {
	}
}
