package main

import (
	"flag"
	"fmt"
	"github.com/PuerkitoBio/goquery"
	"io/ioutil"
	"log"
	"net/http"
	"os"
	"path"
	"regexp"
	"strconv"
	"strings"
	"sync"
	"time"
)

var wg sync.WaitGroup

type picBean struct {
	name        string
	picCount    int
	firstPicUrl string
}

var (
	picSavePath                       = "d:/tmp/ps/"
	picChanMap  map[int]chan *picBean = make(map[int]chan *picBean)
)

func main() {
	//从命令行获取参数
	mainUrlCMD := flag.String("url", "https://www.meitulu.com/t/leisi/", "图片链接地址页")
	picSavePathCMD := flag.String("s", "d:/tmp/ps_leisi/", "下载保存位置")
	flag.Parse()

	picSavePath = *picSavePathCMD
	mainUrl := *mainUrlCMD
	fmt.Println("下载地址和保存位置是:", mainUrl, picSavePath)
	allPicNum, picPageNum, err := getAllpage(mainUrl)
	if err != nil {
		fmt.Println("获取总标签失败", err)
		return
	}
	fmt.Println("图片数量和图片页码数分别是：", allPicNum, picPageNum)

	if allPicNum < 50 {
		allPicNum = 50
	}
	for i := 0; i < 10; i++ {
		picChanMap[i] = make(chan *picBean, allPicNum/5)
	}
	if err != nil {
		fmt.Println("获取总标签失败", err)
		return
	}
	startTime := time.Now()

	for i := 1; i <= picPageNum; i++ {
		wg.Add(1)
		pageUrl := ""
		if i == 1 {
			pageUrl = mainUrl
		} else {
			pageUrl = mainUrl + fmt.Sprint(i) + ".html"
		}
		go pageSearch(pageUrl)
	}
	time.Sleep(time.Second * 5)
	for i := 0; i < 10; i++ {
		wg.Add(1)
		go downPicFromChan(picChanMap[i])
	}

	wg.Wait()
	endTime := time.Now()
	fmt.Println("开始时间|结束时间", startTime.Format("2006-01-02 15:04:05"), endTime.Format("2006-01-02 15:04:05"))
}

//读取管道中的 结构体下载
func downPicFromChan(picChan <-chan *picBean) {

	defer wg.Done()
	fmt.Println("下载开始~~")
label:
	for {
		select {
		case v := <-picChan:
			if v != nil {
				downPicByPicBean(v)
			}
		default:
			fmt.Println("管道取不到数据")
			break label
		}
	}
}

//搜索每个页面下载
func pageSearch(url string) {
	defer wg.Done()
	resp, err := http.Get(url)
	if err != nil {
		fmt.Println("pageSearch", err)
		return
	}
	defer resp.Body.Close()
	doc, err := goquery.NewDocumentFromReader(resp.Body)
	if err != nil {
		fmt.Println(url, "err:", err)
		return
	}
	//所以图片li
	doc.Find(".img li a").Each(func(i int, s *goquery.Selection) {
		findImg := s.Find("img")
		picUrl, _ := findImg.Attr("src")
		picName, _ := findImg.Attr("alt")
		//匹配p标签中图片数量|
		text := s.Next().First().Text()
		if strings.Contains(text, "张") {
			// 获取图片数量
			pageCount, done := getInt(text)
			if done {
				return
			}
			picBean := &picBean{name: picName, picCount: pageCount, firstPicUrl: picUrl}
			picChanMap[i%10] <- picBean
		}
	})

	//关闭通道
	for i := 0; i < 10; i++ {
		//close(picChanMap[i])
	}
	log.Println("finished:", url)
}

//从字符中提取数字
func getInt(text string) (int, bool) {
	compile := regexp.MustCompile(`\D*(\d+)\D*`)
	//	查找所有的 //3	使用正则表达式的提取功能|
	match := compile.FindAllStringSubmatch(text, 1) // 返回的是 [][]string
	pageCount, err := strconv.Atoi(match[0][1])
	if err != nil {
		fmt.Println("图片数量匹配错误")
		return 0, true
	}
	return pageCount, false
}

//根据 图片详情下载图片
func downPicByPicBean(bean *picBean) {
	index := strings.LastIndex(bean.firstPicUrl, "/")
	strBaseUrl := bean.firstPicUrl[:index] //文件基本名字(除去最后扩展名)
	picExt := path.Ext(bean.firstPicUrl)   //文件扩展名

	picSaveFile := picSavePath + "/" + bean.name
	_, err := os.Stat(picSaveFile)
	if err != nil {
		os.MkdirAll(picSavePath+"/"+bean.name, os.ModePerm)
	}
	for i := 1; i <= bean.picCount; i++ {
		client := &http.Client{}
		url := strBaseUrl + "/" + fmt.Sprint(i) + picExt
		request, err := http.NewRequest("GET", url, nil)
		//有些网站需要加referer
		//request.Header.Add("referer", bean.firstPicUrl)
		if err != nil {
			fmt.Println("请求失败")
			panic(err)
		}
		//处理请求结果
		response, err := client.Do(request)
		defer response.Body.Close()
		all, err := ioutil.ReadAll(response.Body)
		ioutil.WriteFile(picSaveFile+"/"+path.Base(url), all, 0644)
	}
	//fmt.Println("down finished:", bean)
}

//获取总页数
func getAllpage(url string) (int, int, error) {
	resp, err := http.Get(url)
	if err != nil {
		fmt.Println("获取总页数出现错误", err)
		return 0, 0, err
	}
	defer resp.Body.Close()
	doc, _ := goquery.NewDocumentFromReader(resp.Body)
	//2137条 上一页 1 2 3 4 5 6 7 8 9 10 ..36 下一页
	allPicNum := doc.Find("#pages").Find("a").Eq(0).Text()
	if allPicNum == "" {
		return 1, 1, nil
	}
	allPicInt, done := getInt(allPicNum)
	if done {
		fmt.Println("提取数字失败")
		return 0, 0, err
	}
	if err != nil {
		fmt.Println("获取标签失败", err)
		return 0, 0, err
	}
	//获取总标签页码
	find := doc.Find("#pages").Find("a").Eq(-2).Text()
	fmt.Println(url, "里面总标签页码是:", find)
	page, err := strconv.Atoi(find)
	if err != nil {
		fmt.Println("获取标签失败", err)
		return 0, 0, err
	}
	return allPicInt, page, nil
}
