package crawl

import (
	"fmt"
	"github.com/gocolly/colly/v2"
	"io"
	"log"
	"net/http"
	"os"
	"strings"
)

const OUTPUT_DIR string = "data/"

func Crawling(stocksNumbers ...string) {
	// Instantiate default collector
	c := colly.NewCollector(
		// Visit only domains: hackerspaces.org, wiki.hackerspaces.org
		colly.AllowedDomains("quotes.money.163.com"),
	)

	err := os.Mkdir(OUTPUT_DIR, os.ModePerm)
	if err != nil && !os.IsExist(err) {
		log.Fatal(err)
	}
	os.Chmod(OUTPUT_DIR, os.ModePerm)

	// On every a element which has href attribute call callback
	// c.OnHTML("#historyData .bd", func(e *colly.HTMLElement) {
	// 	// fmt.Println(e.Text)
	// 	e.ForEach("a", func(i int, h *colly.HTMLElement) {
	// 		// fmt.Println("h.Attr(href)", h.Attr("href"))
	// 		// fmt.Println(h.Request.URL.String())
	// 		filename := strings.Replace(h.Attr("href"), "/", "_", -1)
	// 		strs := strings.Split(filename, "_")
	// 		// filename format: cjmx_datetime_stockID.xls
	// 		filename = "cjmx_" + strs[len(strs)-2] + "_" + strs[len(strs)-1]
	// 		log.Println(filename)
	// 		xlsURL := h.Request.AbsoluteURL(h.Attr("href"))
	// 		// fmt.Println(i, xlsURL)
	// 		downloadURL(xlsURL, OUTPUT_DIR + filename)
	// 	})
	// 	log.Println("Finished downloading", e.Request.URL)
	// })

	// Before making a request print "Visiting ..."
	c.OnRequest(func(r *colly.Request) {
		fmt.Println("Visiting", r.URL.String())
	})

	// Start scraping
	for _, stock := range stocksNumbers {
		c.Visit("http://quotes.money.163.com/trade/cjmx_" + stock + ".html")
	}
}

func downloadURL(url string, filename string) {
	res, err := http.Get(url)
	// fmt.Println(res.Header)
	if strings.Contains(res.Header.Get("Content-Type"), "html") {
		fmt.Println("this is empty html")
		return
	}
	if err != nil {
		fmt.Println("http get error:", err)
		return
	}
	// TODO create dirs before create file
	f, err := os.Create(filename)
	if err != nil {
		fmt.Println("download err:", err)
	} else {
		io.Copy(f, res.Body)
	}
}
