package tools

//https://tool.oschina.net/uploads/apidocs/jquery/regexp.html 正则
import (
	"bufio"
	"fmt"
	"io/ioutil"
	"net/http"
	"os"
	"regexp"
	"strconv"
	"strings"
	"time"
)

var (
	ErrorNullURL    = fmt.Errorf("url is null")
	ErrorInvalidURL = fmt.Errorf("url is not valid")
	SourceUrl       = "https://www.fabiaoqing.com/"
	Domain          = "https://www.fabiaoqing.com"
)

type Robot struct {
	Domain string
}

//
//  getCmdWord
//  @Description: 从cmd 获取用户输入
//
func getCmdWord() string {
	str, err := StrGetCmd()
	if err != nil {
		fmt.Println(err.Error())
	}
	for str == "" {
		fmt.Println("请重新输入：")
		str, err = StrGetCmd()
	}
	return str
}

func Start() {
	r := Robot{}
	r.Domain = SourceUrl
	r.ShowInfo()
	//fmt.Printf("您输入为：%s\r\n", input)
	//input := r.ReadFromUser()
	exp_url := `<a\s+href="(.*?)"\s+.*?>`
	exp_img := `<img.*?data-original=(?:"|')(.*?)(?:'|").*?>`
	page_url := make([]string, 0, 0)
	//获取到所有页面的url
	start := 0
	end := 961
	fmt.Println("请输入开始页面【1-961】：")
	start, errstart := strconv.Atoi(getCmdWord())
	if errstart != nil {
	}
	fmt.Println("请输入结束页面【1-961】：")
	end, errend := strconv.Atoi(getCmdWord())
	if errend != nil {

	}
	for start = start; start <= end; start++ {
		tpmUrl := "https://www.fabiaoqing.com/bqb/lists/type/hot/page/" + strconv.Itoa(start) + ".html"
		page_url = append(page_url, r.GetPage(tpmUrl, exp_url)...)
		fmt.Println(strconv.Itoa(start), ":捕捉", cap(page_url))
	}
	page_url = RemoveRepByMap(page_url)
	fmt.Println(cap(page_url), page_url)

	for _, v := range page_url {
		img_arr := r.FetchImgByExp(v, exp_img)
		fmt.Println(img_arr)
		DownloadImg(img_arr,"")
	}
}


func createDir(path string) (success bool) {
	err := os.MkdirAll(path, os.ModePerm)
	if err != nil {
		fmt.Println("文件夹创建失败：", err)
		return false
	}
	return true
}

func PathExists(path string) (bool, error) {
	_, err := os.Stat(path)
	if err == nil {
		return true, nil
	}
	if os.IsNotExist(err) {
		return false, nil
	}
	return false, err
}

func (Robot) ShowInfo() {
	fmt.Printf("开始爬取")
}



//读取用户填写数据
func (r *Robot) ReadFromUser() string {
	//使用os.Stdin开启输入流
	//函数原型 func NewReader(rd io.Reader) *Reader
	//NewReader创建一个具有默认大小缓冲、从r读取的*Reader 结构见官方文档
	in := bufio.NewReader(os.Stdin)
	//in.ReadLine函数具有三个返回值 []byte bool error
	//分别为读取到的信息 是否数据太长导致缓冲区溢出 是否读取失败
	str, _, err := in.ReadLine()
	if err != nil {
		return err.Error()
	}
	str = []byte(StrRemoveSpace(string(str)))
	if string(str) == "" {
		fmt.Println("输入无效请再次输入：\r\n")
		return r.ReadFromUser()
	}
	return string(str)
}

//获取页面有多少个【用来深度检测所需要爬取img的页面】 strconv.Itoa(i)
func (r *Robot) GetPage(url string, exp string) []string {
	page_url := make([]string, 0, 0)

	//多层获取
	page, e := r.FetchDataByUrl(url)
	//fmt.Printf("%s",b)
	if e != nil {
		fmt.Println("err:", e)
		time.Sleep(time.Duration(500) * time.Millisecond)
		page_url = append(page_url, r.GetPage(url, exp)...)
	} else {
		page_url = append(page_url, r.FetchDataByExp(string(page), exp)...)

	}
	//获取所需页面正则 所有的a标签的地址
	return page_url
}

func (r *Robot) FetchImgByExp(url string, exp string) []string {
	img_arr := make([]string, 0, 0)
	html, err := r.FetchDataByUrl(url)
	if err != nil {
		fmt.Println(err)
		return img_arr
	}
	img_arr = r.FetchDataByExp(string(html), exp)
	return img_arr
	//for _,v_url:= range img_slice{
	//	fmt.Println(cap(v_url))
	//	fmt.Println(v_url[1])
	//}
}

//
func (r *Robot) FetchDataByExp(data string, exp string) []string {
	//pattern := `<p.*?><img.*?src=["|'](.*?)["|'].*?\/?><\/p>`
	preg := regexp.MustCompile(exp)
	if preg == nil {
		fmt.Println("regexp err:", preg)
		//return nil
	}
	var img_arr = make([]string, 0, 0)
	preg_res := preg.FindAllStringSubmatch(data, -1)
	if preg_res == nil {
		return img_arr
	}
	//fmt.Println("++", preg_res)
	for _, v := range preg_res {
		//fmt.Printf("%s,%d v1=%s v2=%s \r\n", v, cap(v), v[0], v[1]) //v1=<img src="/Public/img/logonew.png" style="width:161.5px;height:50px" /> v2=/Public/img/logonew.png
		//检测v[1]是否为 url  若不是则加上 http://www.xxx.com/ r.Domain
		//fmt.Println(v)
		if cap(v) >= 2 {
			if !GetHttp(v[1]) {
				v[1] = Domain + v[1]
			}
			img_arr = append(img_arr, v[1])
		}
	}
	return img_arr
}

//检测str 是否为 jpg png gif bmp 等结尾或包含
func IsSource(str string) bool {
	//检测是否为 后缀名 jpg png bmp mp4 mp3
	source_suffix := []string{"bmp", "jpg", "png", "tif", "gif", "webp", "avif"}
	for _, v := range source_suffix {
		isHaveSuffix := strings.Contains(v, str)
		if isHaveSuffix {
			return true
		}
	}
	return true
}

//检测是否为一个资源协议开头
func GetProtocol(url string) (string, error) {
	// golang的判断语句没有括号，一开始挺不适应的
	if url != "" {
		// 获取冒号索引位置
		index := strings.Index(url, ":")
		// 找到了说明URL正确，否则报错
		if index > 0 {
			// 这里需要注意两点：
			// 1. golang字符串的索引原则是左闭右开
			// 2. 协议类型最好进行格式化处理再输出返回，比如这里是“最大化”（大写格式）
			return strings.ToUpper(url[0:index]), nil
		} else {
			// 注意提前定义ErrorInvalidURL类型
			return "", ErrorInvalidURL
		}
	}
	return "", ErrorNullURL
}
func GetHttp(url string) bool {
	if url != "" {
		// 获取冒号索引位置
		index := strings.Index(url, ":")
		if index > 0 {
			return true
		} else {
			return false
		}
	}
	return true
}

// 根据指定的URL进行数据抓取
func (r *Robot) FetchDataByUrl(url string) ([]byte, error) {
	time.Sleep(time.Duration(400) * time.Millisecond)
	resp, err := HttpRequest(url)
	if err != nil {
		return nil, err
	}
	defer resp.Body.Close()
	if resp.StatusCode != http.StatusOK {
		return nil, fmt.Errorf("wrong status code: %d of %s", resp.StatusCode, url)
	}
	return ioutil.ReadAll(resp.Body)
}

//<p.*?><img.*?src=["|'](.*?)["|'].*?\/?><\/p>
func HttpRequest(url string) (*http.Response, error) {
	request, err := http.NewRequest("GET", url, nil)
	if err != nil {
		return nil, err
	}
	request.Header.Add("Accept", "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8")
	request.Header.Add("Accept-Language", "zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3")
	request.Header.Add("Connection", "keep-alive")
	request.Header.Add("User-Agent", "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36")

	client := http.Client{}
	return client.Do(request)
}
