package util

import (
	"context"
	"fmt"
	"math/rand"
	"os"
	"slices"
	"strconv"
	"sync"
	"time"

	//"fmt"
	"go-chromedp-client/structs"
	"net/url"
	neturl "net/url"
	"strings"

	"github.com/PuerkitoBio/goquery"
)

func BookUrlContent(curl string, ua string, tags_arr []string, subpage string, sublink string, url_info *url.URL, url_post string, mode string, link_res *[]string, ctx context.Context) (string, error) {
	curl = BookFullUrl(curl, url_info, url_post)
	op := &structs.Options{}
	fmt.Println(curl)
	if ua == "mobile" {
		op.Ua = "Mozilla/5.0 (Linux; Android 13; SM-G981B) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Mobile Safari/537.36"
	}

	tt, err := Get(curl, nil, *op)
	if err != nil {
		return err.Error(), err
	}
	cx := context.Background()
	ctx, ctxf := context.WithTimeout(cx, 1200*time.Second)
	defer ctxf()
	//	尝试3次
	if len(tt) < 500 {
		for trynum := 0; trynum < 3; trynum++ {
			tt, err = Get(curl, nil, *op)
			if len(tt) > 500 {
				break
			} else {
				time.Sleep(time.Duration(rand.Int63n(2)+1) * time.Second)
			}
		}
	}
	if len(tt) < 500 {

		return tt, nil

	}
	if !IsUTF8([]byte(tt)) {
		bt, _ := GbkToUTF8([]byte(tt))
		tt = string(bt)
	}

	//fmt.Println(matches)
	st := ""
	tt = strings.ReplaceAll(tt, "<br>", "\r\n")
	tt = strings.ReplaceAll(tt, "<br/>", "\r\n")
	doc, err := goquery.NewDocumentFromReader(strings.NewReader(tt))
	if err != nil {
		return err.Error(), err
	}
	//all_len := len(tt)
	for kk, tag := range tags_arr {
		//防止错乱
		if len(st) > 500 {
			break
		}
		tag_group := strings.Split(tag, "@")
		doc.Find(tag_group[0]).Each(
			func(i int, sel *goquery.Selection) {
				rhtml := ""
				if len(tag_group) < 2 {
					if mode == "html" {
						rhtml, _ = sel.Html()
					} else {
						rhtml = sel.Text()
					}
				} else {
					rhtml, _ = sel.Attr(tag_group[1])
				}

				rhtml = strings.ReplaceAll(rhtml, "<br>", "\r\n")
				rhtml = strings.ReplaceAll(rhtml, "<br/>", "\r\n")
				//替换p的空白
				rhtml = strings.ReplaceAll(rhtml, "　　", "\r\n")
				if kk == 0 {
					rhtml = "-- " + rhtml + " --"
				}
				st += rhtml + "\r\n"

			})
	}
	//没有获取到数据
	if len(st) < 200 {
		doc.Find("div").Each(
			func(i int, sel *goquery.Selection) {
				rhtml := ""
				if mode == "html" {
					rhtml, _ = sel.Html()
				} else {
					rhtml = sel.Text()
				}
				if len(rhtml) < 200 || len(st) > 200 {
					return
				}
				st += rhtml + "\r\n"
			})

	}
	//fmt.Println("subpage", subpage, strings.Contains(tt, subpage))
	//分页兼容
	if subpage == "" && (strings.Contains(st, "本章未完") || strings.Contains(st, "本页未完") || strings.Contains(st, "本节未完") || strings.Contains(st, "下一页")) {
		if strings.Contains(st, "下一节") {
			subpage = "下一节"
		}
		if strings.Contains(st, "下一页") {
			subpage = "下一页"
		}
		if strings.Contains(st, "下一章") {
			subpage = "下一章"
		}

	}
	if subpage != "" {
		nexturl := BookNextUrl(doc, subpage, curl)
		if nexturl == "" {
			return st, nil
		}
		//去重
		isSubpage := UrlCompare(curl, nexturl)
		if isSubpage && sublink != "nextall" {
			return st, nil
		}
		repeatUrl := false
		if link_res != nil {
			for _, link := range *link_res {
				if strings.Contains(link, nexturl) || (strings.Contains(nexturl, link) && nexturl != link) {
					repeatUrl = true
					*link_res = append(*link_res, nexturl)
					break
				}
			}
		}
		raw_link := nexturl
		nexturl = BookFullUrl(nexturl, url_info, url_post)
		if nexturl == curl {
			repeatUrl = true
			*link_res = append(*link_res, raw_link)
		}
		if !repeatUrl {
			select {
			case <-ctx.Done():
				return st, nil
			default:
			}

			nhref, err := neturl.QueryUnescape(nexturl)
			if err != nil {
				fmt.Println("book urldecode 304", err.Error())
				nhref = nexturl
			}
			nexturl = nhref
			if link_res != nil && slices.Contains(*link_res, nexturl) {
				return st, nil
			}
			fmt.Println("nexturl", nexturl)

			res, err := BookUrlContent(nexturl, ua, tags_arr, subpage, sublink, url_info, url_post, mode, link_res, ctx)
			if err == nil {
				st += res
			}
		}

	}
	return st, nil
}

func BookFullUrl(curl string, url_info *url.URL, url_post string) string {
	if strings.Contains(curl, "//") == false {
		if url_post != "" && strings.Contains(curl, "/") == false {
			if string(url_post[len(url_post)-1]) == "/" {
				curl = url_post + curl
			} else {
				curl = url_post + "/" + curl
			}

		} else {
			u := &neturl.URL{
				Scheme: url_info.Scheme,
				Host:   url_info.Host,
				Path:   curl,
			}
			curl = u.String()
		}
	}
	return curl
}

func BookNextUrl(doc *goquery.Document, subpage string, curl string) string {
	if subpage == "" {
		return ""
	}
	res := ""
	doc.Find("a").Each(
		func(i int, sel *goquery.Selection) {
			rtext := sel.Text()
			href, at := sel.Attr("href")
			if !at {
				return
			}
			if strings.Contains(rtext, "html") {
				fmt.Println(rtext, subpage)
			}
			//包含下一页
			if strings.Contains(rtext, subpage) {
				res = href
				return
			}

		})

	//fmt.Println(res)
	//fmt.Println(subpage)
	return res

}

func BookMutiDownload(maxProcces int, link_res []string, url_post string, name string, tags_arr []string, subpage string, sublink string, ua string) (string, string, string) {
	url_info, err := neturl.Parse(url_post)
	if err != nil {

		return "", "", err.Error()
	}
	cx := context.Background()
	ctx, ctxf := context.WithTimeout(cx, 1200*time.Second)
	defer ctxf()
	//开启100个并发进程
	chFinish := make(chan int, maxProcces)
	now_chap := 0
	//如果超过当前章节便休眠

	sleep_time := 0
	map_res := &sync.Map{}
	go func() {
		for k, url := range link_res {
			for {
				if k-now_chap < 2*maxProcces {
					sleep_time = 0
					break
				} else {
					time.Sleep(2 * time.Second)
					sleep_time += 2
					if sleep_time > 60 {
						map_res.Store(now_chap+1, strconv.Itoa(now_chap+1)+"超时")
						sleep_time = 0
						break
					}
				}
			}
			url := url
			tg := k

			go func() {
				tg := tg
				curl := url

				st, err := BookUrlContent(curl, ua, tags_arr, subpage, sublink, url_info, url_post, "", &link_res, ctx)
				if err != nil {
					map_res.Store(tg, "cyberErrorExit:"+strconv.Itoa(tg)+":"+err.Error()+"\r\n")
					return
				}
				map_res.Store(tg, st)
				fmt.Println(tg, "已完成", curl)
				<-chFinish
			}()
			chFinish <- 1
		}
	}()
	if name == "" {
		name = RandStringRunes(10)
	}
	dp := string(os.PathSeparator)
	root_path := os.Getenv("ROOT_PATH") + dp + "web" + dp
	filename := "output" + dp + name + ".txt"
	out_path := root_path + filename
	FilePutContent(out_path, GetDate()+"\r\n"+url_post+"\r\n"+sublink+"\r\n", false)
	for i := 0; i < len(link_res); i += 1 {

		var str interface{}
		for {
			var ok bool
			str, ok = map_res.LoadAndDelete(i)
			if !ok {
				time.Sleep(1 * time.Second)
				select {
				case <-ctx.Done():
					// 在等待期间上下文被取消，退出外层循环
					return "", "", "超时"
				default:
					// 继续等待
				}
				continue
			} else {
				break
			}
		}

		now_chap = i
		fmt.Println("写入章节:", i)
		FilePutContent(out_path, str.(string), true)
		//退出了
		if strings.Contains(str.(string), "cyberErrorExit") {
			return root_path, filename, str.(string)
		}
	}
	return root_path, filename, "ok"
}

func BookOneDownload(maxProcces int, link_res []string, url_post string, name string, tags_arr []string, subpage string, sublink string, ua string) (string, string, string) {
	url_info, err := neturl.Parse(url_post)
	cx := context.Background()
	ctx, ctxf := context.WithTimeout(cx, 1200*time.Second)
	defer ctxf()
	if err != nil {

		return "", "", err.Error()
	}
	//开启100个并发进程
	dp := string(os.PathSeparator)
	root_path := os.Getenv("ROOT_PATH") + dp + "web" + dp
	filename := "output" + dp + name + ".txt"
	out_path := root_path + filename
	FilePutContent(out_path, GetDate()+"\r\n"+url_post+"\r\n"+strings.Join(tags_arr, ",")+"\r\n"+sublink, false)

	for tg, url := range link_res {
		url := url
		curl := url
		str, err := BookUrlContent(curl, ua, tags_arr, subpage, sublink, url_info, url_post, "", &link_res, ctx)
		if err != nil {
			fmt.Println(err)

			FilePutContent(out_path, strconv.Itoa(tg)+":"+err.Error()+"\r\n", true)
			return root_path, filename, err.Error()
		}
		FilePutContent(out_path, str, true)

	}

	return root_path, filename, "ok"
}
