package service

import (
	"errors"
	"fmt"
	"gitee.com/gomod/utils/logger"
	"github.com/PuerkitoBio/goquery"
	"github.com/gocolly/colly"
	_ "github.com/gocolly/colly/extensions"
	"net"
	"net/http"
	"regexp"
	"strconv"
	"strings"
	"time"
)

type Xue600Page struct {
	Id        string
	Title     string
	Image     string
	Time      string
	Categorys map[string]string
}
type Xue600Detail struct {
	Id      string
	Common  string
	Vip     string
	VipAll  string
	Sold    string
	Content string
	Pwd     string
}
type Xue600Download struct {
	Id  string
	Url string
}

func Crawler600XueTotalPage() (t int) {
	// 创建一个新的Colly收集器
	c := getNewCollector()

	// 登录请求处理
	c.OnRequest(func(r *colly.Request) {
		r.Headers = getHeader()
	})

	// 登录请求后的响应处理
	c.OnResponse(func(r *colly.Response) {
		if r.StatusCode == http.StatusOK {
			// 登录成功后执行爬取操作
			total, err := scrapeTotalPageData(r.Body)
			if err != nil {
				logger.Error.Println("Failed to scrape data:", err)
			}
			t = total
		} else {
			logger.Info.Println("Login failed with status code:", r.StatusCode)
		}
	})

	// 执行爬虫
	u := "https://www.600xue.com"
	err := c.Visit(u)
	if err != nil {
		logger.Error.Println("Failed to visit target page:", err)
	}
	return
}

func scrapeTotalPageData(body []byte) (int, error) {
	doc, err := goquery.NewDocumentFromReader(strings.NewReader(string(body)))
	if err != nil {
		return 50, err
	}

	total := 0
	doc.Find(".wp-pagenavi").Find(".last").Each(func(i int, s *goquery.Selection) {
		total = 50
		l, _ := s.Attr("href")
		la := strings.Split(l, "/")
		p := la[len(la)-2]
		pages, err := strconv.Atoi(p)
		if err == nil {
			total = pages
			logger.Info.Println("Find Total Page:", total)
		}
	})

	return total, nil
}

func Crawler600XuePage(page int) (array []Xue600Page) {
	// 创建一个新的Colly收集器
	c := getNewCollector()

	// 登录请求处理
	c.OnRequest(func(r *colly.Request) {
		r.Headers = getHeader()
	})

	// 登录请求后的响应处理
	c.OnResponse(func(r *colly.Response) {
		if r.StatusCode == http.StatusOK {
			// 登录成功后执行爬取操作
			arr, err := scrapeData(r.Body)
			if err != nil {
				logger.Error.Println("Failed to scrape data:", err)
			}
			array = arr
		} else {
			logger.Info.Println("Login failed with status code:", r.StatusCode)
		}
	})

	// 执行爬虫
	u := "https://www.600xue.com/"
	if page > 1 {
		u += "page/" + strconv.Itoa(page)
	}
	err := c.Visit(u)
	if err != nil {
		logger.Error.Println("Failed to visit target page:", err)
	}
	return
}

func getHeader() *http.Header {
	h := http.Header{}
	h.Set("Accept", "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7")
	h.Set("Accept-Encoding", "gzip, deflate, br")
	h.Set("Accept-Language", "zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6")
	h.Set("Sec-Ch-Ua", "\"Not.A/Brand\";v=\"8\", \"Chromium\";v=\"114\", \"Microsoft Edge\";v=\"114\"")
	h.Set("Sec-Ch-Ua-Mobile", "?0")
	h.Set("Sec-Ch-Ua-Platform", "\"macOS\"")
	h.Set("Sec-Fetch-Dest", "document")
	h.Set("Sec-Fetch-Mode", "navigate")
	h.Set("Sec-Fetch-Site", "none")
	h.Set("Sec-Fetch-User", "?1")
	h.Set("Upgrade-Insecure-Requests", "1")
	h.Set("User-Agent", "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36 Edg/114.0.1823.51")
	return &h
}

func scrapeData(body []byte) ([]Xue600Page, error) {
	doc, err := goquery.NewDocumentFromReader(strings.NewReader(string(body)))
	if err != nil {
		return nil, err
	}

	arr := []Xue600Page{}
	// 在这里使用goquery提取数据，例如：
	doc.Find("article").Each(func(i int, s *goquery.Selection) {
		img, _ := s.Find("img").Attr("data-src")
		time, _ := s.Find("time").Attr("datetime")
		entryTitle := s.Find(".entry-title")
		entryTitleA := entryTitle.Find("a")
		href, _ := entryTitleA.Attr("href")
		title := entryTitleA.Text()
		categoryAs := s.Find(".meta-category").Find("a")
		categories := map[string]string{}
		categoryAs.Each(func(i int, s *goquery.Selection) {
			l, _ := s.Attr("href")
			la := strings.Split(l, "/")
			link := la[len(la)-2]
			categories[link] = strings.TrimSpace(s.Text())
		})
		ha := strings.Split(href, "/")
		id := ha[len(ha)-2]
		x := Xue600Page{
			Title:     title,
			Id:        id,
			Image:     img,
			Time:      time,
			Categorys: categories,
		}
		arr = append(arr, x)
		logger.Info.Println("Title:", title)
		logger.Info.Println("Link:", href)
		logger.Info.Println("Image:", img)
		logger.Info.Println("Time:", time)
	})
	return arr, nil
}

func Crawler600XueContentPrice(loginCookie []string, id uint) (d Xue600Detail, err error) {
	// 创建一个新的Colly收集器
	c := getNewCollector()

	// 登录请求处理
	c.OnRequest(func(r *colly.Request) {
		if loginCookie == nil {
			r.Headers = getHeader()
		} else {
			for _, s := range loginCookie {
				r.Headers.Add("Cookie", s)
			}
		}
	})

	// 登录请求后的响应处理
	c.OnResponse(func(r *colly.Response) {
		if r.StatusCode == http.StatusOK {
			// 登录成功后执行爬取操作
			arr, err := scrapeContentPriceData(r.Body)
			if err != nil {
				logger.Error.Println("Crawler600XueDetail Failed to scrape data:", err)
			}
			d = arr
			d.Id = strconv.Itoa(int(id))
		} else {
			logger.Info.Println("Crawler600XueDetail failed with status code:", r.StatusCode)
		}
	})

	// 执行爬虫
	u := "https://www.600xue.com/" + strconv.FormatUint(uint64(id), 10) + "/"
	err = c.Visit(u)
	if err != nil {
		logger.Info.Println("Crawler600XueDetail Failed to visit target page:", err)
	}
	return
}

func scrapeContentPriceData(body []byte) (d Xue600Detail, err error) {
	doc, err := goquery.NewDocumentFromReader(strings.NewReader(string(body)))
	if err != nil {
		return
	}

	content, err := doc.Find(".entry-content").Html()
	d.Content = content
	doc.Find(".pricing__opt").Each(func(i int, s *goquery.Selection) {
		if i == 0 {
			d.Common = s.Text()
		} else if i == 1 {
			d.Vip = s.Text()
		} else if i == 2 {
			d.VipAll = s.Text()
		}
	})
	doc.Find(".list-paybody").Find("li").Each(func(i int, s *goquery.Selection) {
		if i == 1 {
			s.Find("span").Each(func(i int, s *goquery.Selection) {
				if i == 1 {
					d.Sold = s.Text()
				}
			})
		}
	})
	doc.Find("#refurl").Each(func(i int, s *goquery.Selection) {
		d.Pwd = s.Text()
	})
	if d.Pwd == "" {
		h, _ := doc.Html()
		logger.Info.Println("html:", h)
	}
	return
}

func Login() (loginCookie []string, err error) {
	c := getNewCollector()

	c.UserAgent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36"
	//extensions.RandomUserAgent(c)

	// 在OnResponse回调中处理Cookie
	c.OnResponse(func(r *colly.Response) {
		// 获取响应头中的Set-Cookie字段
		v := r.Headers.Values("Set-Cookie")
		fmt.Println(v)
		// 保存Cookie值
		loginCookie = v
	})

	// 定义登录表单的数据
	// action: user_login
	// username: daming924@qq.com
	// password: ydaming87924
	// 171104587@qq.com Abc123!!!
	loginData := map[string]string{}
	loginData["action"] = "user_login"
	//loginData["username"] = "171104587@qq.com"
	//loginData["password"] = "Abc123!!!"
	loginData["username"] = "daming924@qq.com"
	loginData["password"] = "ydaming87924"

	logger.Info.Println("start post login")
	// 向登录表单提交数据
	err = c.Post("https://www.600xue.com/wp-admin/admin-ajax.php", loginData)
	if err != nil {
		logger.Info.Println("Failed to submit login form:", err)
	}
	return
}

func Crawler600XueDownload(loginCookie []string, id uint) (d Xue600Download, err error) {
	logger.Info.Println("ready to download product: ", id)
	// 创建一个新的Colly收集器
	c := getNewCollector()

	c.UserAgent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36"
	//extensions.RandomUserAgent(c)

	// 在后续的请求中自动附带Cookie
	c.OnRequest(func(r *colly.Request) {
		for _, s := range loginCookie {
			r.Headers.Add("Cookie", s)
		}
	})

	// 登录请求后的响应处理
	c.OnResponse(func(r *colly.Response) {
		if r.StatusCode == http.StatusOK {
			// 登录成功后执行爬取操作
			arr, err := scrapeDownloadData(r.Body)
			d = arr
			if err != nil {
				logger.Error.Println(strconv.FormatUint(uint64(id), 10)+" Crawler600XueDownload Failed to scrape data:", err)
				return
			}
			d.Id = fmt.Sprint(id)
		} else {
			logger.Info.Println("Crawler600XueDownload failed with status code:", r.StatusCode)
		}
	})

	// 执行爬虫
	u := "https://www.600xue.com/go?post_id=" + strconv.FormatUint(uint64(id), 10)
	err = c.Visit(u)
	if err != nil {
		logger.Info.Println("Crawler600XueDownload Failed to visit target page:", err)
	}
	return
}

func scrapeDownloadData(body []byte) (d Xue600Download, err error) {
	doc, err := goquery.NewDocumentFromReader(strings.NewReader(string(body)))
	if err != nil {
		return
	}

	h, err := doc.Html()
	if err != nil {
		return
	}
	// 解析下载链接
	regex := `window\.location=['"]([^'"]+)`
	re := regexp.MustCompile(regex)
	match := re.FindStringSubmatch(h)
	if len(match) != 2 {
		err = errors.New("Failed to parse download url")
		fmt.Println(h)
		logger.Info.Println("html: " + h)
		if strings.Contains(h, "下载次数超出限制") {
			d.Url = "0"
		}
		return
	}
	d.Url = match[1]
	logger.Info.Println("url: " + d.Url)
	//if !strings.Contains(d.Url, "pwd=") {
	//	fmt.Println("html:", h)
	//}
	return
}

func getNewCollector() *colly.Collector {
	c := colly.NewCollector()
	c.WithTransport(&http.Transport{
		Proxy: http.ProxyFromEnvironment,
		DialContext: (&net.Dialer{
			Timeout:   600 * time.Second,
			KeepAlive: 30 * time.Second,
		}).DialContext,
		MaxIdleConns:          100,
		IdleConnTimeout:       90 * time.Second,
		TLSHandshakeTimeout:   600 * time.Second,
		ExpectContinueTimeout: 1 * time.Second,
	})
	c.SetRequestTimeout(time.Second * 3600)
	return c
}
