package main

import (
	"fmt"
	"io/ioutil"
	"net/http"
	"regexp"
	"strconv"
	"strings"
	"sync"
	"time"
)

var (
	//\d代表数字
	reQQEmail = `(\d+)@qq.com`
	//匹配邮箱
	reEmail = `\w+@\w+\.\w+(\.\w+)?`
	//链接
	reLink  = `href="(https?://[\s\S]+?)"`
	rePhone = `1[3456789]\d\s?\d{4}\s?\d{4}`
	//410222 1987 06 13 4038
	reIdcard = `[12345678]\d{5}((19\d{2})|(20[01]))((0[1-9]|[1[012]]))((0[1-9])|[12]\d|[3[01]])\d{3}[\dXx]`
	reImg    = `"(http[s]??://.+\.(jpg|png))"`
)

//|(gif)|(ico)
var (
	imageUrls []string
	wg        sync.WaitGroup
)

func main() {

	// myTest()
	// TestDownloadImg()
	// return

	//1.初始化数据通道
	imageUrls = []string{}
	chanTask := make(chan bool, 5)

	//2.爬虫协程
	for i := 1; i <= 2; i++ {
		wg.Add(1)
		//获取某个页面所有图片链接
		go GetImgUrls("https://studygolang.com/books?p=" + strconv.Itoa(i))
	}

	wg.Wait()

	fmt.Printf("----总共爬取了%d张图片\n", len(imageUrls))

	for i := 0; i < len(imageUrls); i++ {
		wg.Add(1)
		//同时并发下载5个
		go DownloadImg(chanTask, i, imageUrls[i])
	}

	wg.Wait()
}

//爬当前页所有图片链接，并添加到管道
func GetImgUrls(pageURL string) {
	//爬当前页所有图片链接
	urls := FindImgsUrls(pageURL)
	imageUrls = append(imageUrls, urls...)
	wg.Done()
}

//找图片链接
func FindImgsUrls(url string) (urls []string) {
	//根据url取内容
	pageStr := GetPageStr(url)
	//fmt.Println(pageStr)
	//获取正则对象
	re := regexp.MustCompile(reImg)
	results := re.FindAllStringSubmatch(pageStr, -1)
	fmt.Printf("找到%d条结果:\n", len(results))
	for _, result := range results {
		fmt.Println(result[1])

		url := result[1]
		urls = append(urls, url)
	}
	return
}

//根据url获取页面内容
func GetPageStr(url string) (pageStr string) {
	// Request the HTML page.
	res, err := http.Get(url)
	if err != nil {
		HandleError(err, "http.Get url")
	}
	defer res.Body.Close()
	if res.StatusCode != 200 {
		fmt.Printf("status code error: %d %s", res.StatusCode, res.Status)
	}

	//接收页面
	pageBytes, err := ioutil.ReadAll(res.Body)
	HandleError(err, "ioutil.ReadAll")
	//打印页面内容
	pageStr = string(pageBytes)
	return pageStr
}

func GetImg(url string) {
	pageStr := GetPageStr(url)
	fmt.Println(pageStr)
	re := regexp.MustCompile(reImg)
	results := re.FindAllStringSubmatch(pageStr, -1)
	fmt.Printf("找到%d条结果:\n", len(results))
	for _, result := range results {
		fmt.Println(result)
		fmt.Println(result[0])
	}
}

//下载
func DownloadFile(url string, filename string) (ok bool) {
	//发请求
	resp, err := http.Get(url)
	if err != nil {
		HandleError(err, "http.Get")
		return
	}
	//关闭资源
	defer resp.Body.Close()
	//读取响应内容
	data, e := ioutil.ReadAll(resp.Body)
	HandleError(e, "ioutil resp.Body")

	//写入硬盘
	err = ioutil.WriteFile(filename, data, 0666)
	HandleError(err, "http.GetWrite")
	if err != nil {
		return false
	}
	return true
}

//处理异常
func HandleError(err error, why string) {
	if err != nil {
		fmt.Println(why, err)
	}
}

//下载图片
func DownloadImg(task chan bool, index int, url string) {

	defer wg.Done()

	task <- true

	start := time.Now().UnixNano() / 1e6
	fmt.Printf("开始下载第%d张图片\n", index)

	//得到全路径
	filename := GetFilenameFromUrl(url, "./images/")
	//保存到硬盘
	ok := DownloadFile(url, filename)
	end := time.Now().UnixNano() / 1e6
	if ok {
		fmt.Printf("第%d张图片下载成功，耗时%d毫秒\n", index, (end - start))
	} else {
		fmt.Printf("第%d张图片下载失败\n", index)
	}

	<-task
}

//拼接文件名
func GetFilenameFromUrl(url string, dirPath string) (filename string) {
	//strings包的方法，截取最后一个/
	lastIndex := strings.LastIndex(url, "/")
	filename = url[lastIndex+1:]
	//加一个时间戳，防止重名
	timePrefix := strconv.Itoa(int(time.Now().UnixNano()))
	filename = timePrefix + "_" + filename
	filename = dirPath + filename
	return
}

//测试是否能拿到数据
func myTest() {
	//1.获取页面内容
	pageStr := GetPageStr("https://m.meizitu.com/")
	fmt.Println(pageStr)
	//2.获取图片链接
	GetImg("https://m.meizitu.com/")
}

//图片下载
func TestDownloadImg() {
	ok := DownloadFile("http://i1.whymtj.com/uploads/tu/201903/9999/rne35bbd2303.jpg", "1.jpg")
	if ok {
		fmt.Println("下载成功")
	} else {
		fmt.Println("下载失败")
	}
}
