package main

import (
	"log"
	"os"
	"strconv"
	"time"

	"github.com/gocolly/colly"
)

var logger *log.Logger
var targetPage string

func main() {
	logfile := "output.log." + strconv.FormatInt(time.Now().UnixNano(), 10)
	file, err := os.OpenFile(logfile, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666)
	if err != nil {
		log.Fatalln("fail to create file!")
	}
	defer file.Close()
	logger = log.New(file, "", log.LstdFlags)
	logger.SetPrefix("[Test] ") // 设置日志前缀
	logger.SetFlags(log.LstdFlags | log.Lshortfile)

	// Instantiate default collector
	c := colly.NewCollector(
		// MaxDepth is 2, so only the links on the scraped page
		// and links on those pages are visited
		colly.MaxDepth(2),
		colly.Async(true),
	)

	// Limit the maximum parallelism to 2
	// This is necessary if the goroutines are dynamically
	// created to control the limit of simultaneous requests.
	//
	// Parallelism can be controlled also by spawning fixed
	// number of go routines.
	c.Limit(&colly.LimitRule{DomainGlob: "https://www.google.com.hk/", Parallelism: 2})

	c.OnRequest(func(r *colly.Request) {
		r.Headers.Set("User-Agent", "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.116 Safari/537.36")
		r.Headers.Set("accept-language", "zh-CN,zh;q=0.9,en;q=0.8")
	})

	// On every a element which has href attribute call callback
	c.OnHTML("a[href]", func(e *colly.HTMLElement) {
		link := e.Attr("href")
		title := e.Text

		// Print link
		logger.Println(title, link)
		log.Println(title, link)

		// Visit link found on page on a new thread
		if title == "更多地点" {
			targetPage = link
		}
		e.Request.Visit(link)
	})

	// Start scraping on https://en.wikipedia.org
	c.Visit("https://www.google.com.hk/search?q=vape+liquid+USA+stores&oq=vape+liquid+USA+stores")
	// Wait until threads are finished
	c.Wait()

	if targetPage != "" {
		c.Visit(targetPage)
	}

	c.Wait()

}
