package main

import (
	"bboying_spider/cos"
	"bboying_spider/data"
	"bboying_spider/db"
	"bboying_spider/utils"
	"encoding/json"
	"fmt"
	browser "github.com/EDDYCJY/fake-useragent"
	"github.com/gocolly/colly"
	"log"
	"net/http"
	"os"
	"regexp"
	"strings"
	"time"
)

func main() {

	bboySetKey := utils.BuildSpiderBboysSetKey()
	//bboySetKey := utils.BuildSpiderBgirlsSetKey()
	bboyKeys, err := db.Instance().ZRange(bboySetKey, 0, -1)
	if err != nil {
		fmt.Println("query keys error = " + err.Error())
		return
	}

	//获取全部的bboy or bgirl
	dancers := make([]data.Dancer, 0)
	for _, bboyKey := range bboyKeys {
		//获取User
		dancerStr, err := db.Instance().GetValue(bboyKey)
		if err != nil {
			continue
		}
		dancer := data.Dancer{}
		//解析Dancer
		err = json.Unmarshal([]byte(dancerStr), &dancer)
		if err != nil {
			continue
		}
		dancers = append(dancers, dancer)
	}

	for i, dancer := range dancers {
		//详情url
		detailsUrl := dancer.DetailUrl
		//详情id
		detailsId := utils.BuildDancerDetailsId(dancer, dancer.PublishTime)

		fmt.Printf("任务进行中：%d/%d", i, len(dancers))

		detailsId = sanitizeFileName(detailsId)

		spiderDetailsPage(dancer, detailsUrl, detailsId)

	}
}

func spiderDetailsPage(dancer data.Dancer, detailsUrl string, detailsId string) {
	c := colly.NewCollector(
		colly.AllowURLRevisit(),
	)

	c.OnRequest(func(r *colly.Request) {
		//r.Headers.Set("User-Agent", RandomString())
		random := browser.Random()
		r.Headers.Set("User-Agent", random)
		//r.Headers.Set("User-Agent", "Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Mobile Safari/537.36 Edg/128.0.0.0")
	})

	c.OnHTML("main.site-main", func(main *colly.HTMLElement) {
		htmlStr, err := main.DOM.Html()
		if err != nil {
			fmt.Println(err)
			return
		}
		dancerDetails := data.DancerDetails{
			Id:       detailsId,
			Nickname: dancer.Nickname,
			ImgUrl:   dancer.ImgUrl,
		}
		imgUrls := make([]string, 0)
		imgIndex := 0
		imgTempUrls := make([]string, 0)
		main.ForEach("*", func(i int, e *colly.HTMLElement) {
			switch e.Name {
			case "time":
				if e.Attr("class") == "updated" {
					updatedTimeStr := e.Attr("datetime")
					updatedTime := time.Now()
					timeFormat := "2006-01-02T15:04:05+00:00"
					updatedTime, err := time.Parse(timeFormat, updatedTimeStr)
					if err != nil {
						updatedTime = time.Now()
					}
					dancerDetails.UpdateTime = updatedTime.UnixMilli()
				}
			case "img":
				if e.DOM.AttrOr("data-lazy-srcset", "") != "" {
					imgText := e.Attr("data-lazy-srcset")
					// 正则表达式
					//pattern := `https:\/\/[^\s,]+\.jpg`
					pattern := `https:\/\/[^\s,]+\.(jpg|png|webp)`

					// 编译正则表达式
					re := regexp.MustCompile(pattern)

					// 查找第一个匹配的链接
					match := re.FindString(imgText)

					// 输出结果
					if match != "" {
						//fmt.Println("找到的第一个图片链接是:", match)
						if !strSliceContains(imgTempUrls, match) {
							// 发起 HTTP 请求下载文件
							imgTempUrls = append(imgTempUrls, match)
							resp, err := http.Get(match)
							if err != nil {
								fmt.Println("Err 下载图片失败:" + match)
							} else {
								//获取正常
								fmt.Println("下载图片开始")
								key := fmt.Sprint("spider/", fmt.Sprintf("%s.jpg", fmt.Sprintf("%s-%d", detailsId, imgIndex)))
								if strings.HasSuffix(match, ".jpg") {
									key = fmt.Sprint("spider/", fmt.Sprintf("%s.jpg", fmt.Sprintf("%s-%d", detailsId, imgIndex)))
								} else if strings.HasSuffix(match, ".png") {
									key = fmt.Sprint("spider/", fmt.Sprintf("%s.png", fmt.Sprintf("%s-%d", detailsId, imgIndex)))
								} else if strings.HasSuffix(match, ".webp") {
									key = fmt.Sprint("spider/", fmt.Sprintf("%s.webp", fmt.Sprintf("%s-%d", detailsId, imgIndex)))
								}
								downloadUrl, err := cos.Instance().PutFile(key, resp.Body)
								defer resp.Body.Close()
								if err == nil {
									match = downloadUrl
								}
							}
							imgUrls = append(imgUrls, match)
						}
					} else {
						//fmt.Println("没有找到匹配的图片链接")
					}
				}
				break
			}
		})
		dancerDetails.ImgUrls = imgUrls

		//fmt.Println(htmlStr)

		// 指定完整的文件路径
		filePath := fmt.Sprintf("details/%s.html", detailsId)

		// 创建或覆盖文件
		file, err := os.Create(filePath)
		if err != nil {
			log.Fatalf("Failed to create or open file: %v", err)
		}
		defer file.Close()

		_, err = file.WriteString(htmlStr)
		if err != nil {
			log.Fatalf("Failed to write to file: %v", err)
		}

		log.Println("HTML content has been written to", filePath)

		dancerDetails.HtmlFilePath = filePath

		dancer.DetailId = detailsId

		//将DancerDetails解析为bytes
		detailsBytes, err := json.Marshal(dancerDetails)
		dancerBytes, err := json.Marshal(dancer)
		if err != nil {
			//sugarLogger.Errorf("Error  save json : %s", err)
			fmt.Printf("Error parse Json %s \n", err)
		} else { //保存到DB
			//log.Println("开启保存事务")
			db.Instance().StartPipeline()
			err = db.Instance().SetValue(dancerDetails.Id, string(detailsBytes))
			err = db.Instance().SetValue(dancer.Id, string(dancerBytes))
			err = db.Instance().Exec()
			if err != nil {
				fmt.Printf("保存失败%s \n", err)
				//sugarLogger.Errorf("Error Page save %s to db : %s", dancerDetails.Nickname, err)
			} else {
				fmt.Println("保存成功：", dancerDetails)
				//sugarLogger.Infof("Info Page save %s to db, Id is %s", dancerDetails.Nickname, dancerDetails.Id)
			}
		}
	})
	fmt.Println("访问：" + detailsUrl)
	c.Visit(detailsUrl)
}

func strSliceContains(s []string, e string) bool {
	for _, a := range s {
		if a == e {
			return true
		}
	}
	return false
}

// sanitizeFileName 替换文件名中的非法字符。
func sanitizeFileName(filename string) string {
	// 定义一个映射，用来替换非法字符
	replacements := strings.NewReplacer(
		"/", "_", // 将 / 替换为 _
		"\\", "-", // 将 \ 替换为 -
		":", ";", // 将 : 替换为 ;
		"*", "x", // 将 * 替换为 x
		"?", ".", // 将 ? 替换为 .
		`"`, `'`, // 将 " 替换为 '
		"<", "(", // 将 < 替换为 (
		">", ")", // 将 > 替换为 )
		"|", "+", // 将 | 替换为 +
	)

	// 应用替换
	sanitized := replacements.Replace(filename)

	// 如果需要，可以进一步处理其他非法字符或限制长度等
	return sanitized
}
