package main

import (
	"fmt"
	"io/ioutil"
	"log"
	"net/http"
	"os"
	"regexp"
	"strconv"
	"strings"
	"time"
)

var (
	// 存放图片链接的数据管道
	chanImageUrls chan string
	// 存放网页的通道
	chanURL chan string
	// 用于监控协程
	reImg = `https?://[^"]+?(\.((jpg)|(png)|(jpeg)|(gif)|(bmp)))`
	// 图片保存路径
	saveDir = "./pic/"
)

func init() {
	os.MkdirAll(saveDir, 0755)
}

func main() {
	// 1.初始化管道
	chanImageUrls = make(chan string, 1000000)
	chanURL = make(chan string, 10000)
	//2.初始化URL 数据
	go func() {
		for i := 1; i < 100; i++ {
			url := "https://www.bizhizu.cn/wallpaper/" + strconv.Itoa(i) + ".html"
			chanURL <- url
		}
	}()
	// 3.爬虫协程,分析存在网页中的图片链接
	for i := 0; i < 20; i++ {
		go getImgUrls()
	}
	// 4.下载协程：从管道中读取链接并下载
	for i := 0; i < 20; i++ {
		go DownloadImg()
	}
	select {}
}

// 下载图片，传入的是图片叫什么
func downloadFile(url string, filename string) (ok bool) {
	resp, err := http.Get(url)
	if err != nil {
		log.Println("err:", err)
		return false
	}
	defer resp.Body.Close()
	bytes, err := ioutil.ReadAll(resp.Body)
	filename = saveDir + filename
	// 写数据
	err = ioutil.WriteFile(filename, bytes, 0666)
	if err != nil {
		return false
	} else {
		return true
	}
}

// 下载图片
func DownloadImg() {
	for url := range chanImageUrls {
		fmt.Println("剩余:", len(chanImageUrls))
		filename := GetFilenameFromUrl(url)
		ok := downloadFile(url, filename)
		if ok {
			fmt.Printf("%s 下载成功\n", filename)
		} else {
			fmt.Printf("%s 下载失败\n", filename)
		}
	}
}

// 截取url名字
func GetFilenameFromUrl(url string) (filename string) {
	// 返回最后一个/的位置
	lastIndex := strings.LastIndex(url, "/")
	// 切出来
	filename = url[lastIndex+1:]
	// 时间戳解决重名
	timePrefix := strconv.Itoa(int(time.Now().UnixNano()))
	filename = timePrefix + "_" + filename
	return
}

// 爬图片链接到管道
// url是传的整页链接
func getImgUrls() {
	for url := range chanURL {
		fmt.Println(url)
		urls := getImgs(url)
		// 遍历切片里所有链接，存入数据管道
		for _, url := range urls {
			chanImageUrls <- url
		}
	}
}

// 获取当前页图片链接
func getImgs(url string) (urls []string) {
	pageStr := GetPageStr(url)
	re := regexp.MustCompile(reImg)
	results := re.FindAllStringSubmatch(pageStr, -1)
	fmt.Printf("共找到%d条结果\n", len(results))
	for _, result := range results {
		url := result[0]
		urls = append(urls, url)
	}
	return
}

// 抽取根据url获取内容
func GetPageStr(url string) (pageStr string) {
	resp, _ := http.Get(url)

	defer resp.Body.Close()

	// 2.读取页面内容
	pageBytes, _ := ioutil.ReadAll(resp.Body)

	// 字节转字符串
	pageStr = string(pageBytes)
	return pageStr
}