package main

import (
	"fmt"
	"net/http"
	"net/url"
	"os"
	"os/signal"
	"strings"
	"sync"
	"time"

	. "github.com/soekchl/myUtils"
)

// https://www.dumanhhua.com/

type Dumanhhua struct {
	li   [2]string
	href [2]string
	span [2]string

	errDown map[string]string

	body     string            // 网页内容
	haveNum  bool              // 标题有数字对比
	ifUseNum bool              // if use haveNum
	missPage map[int][2]string // 缺少页面  4-标题、下载地址

	maxCount int
	okCount  int
}

const (
	dumanStartKey = `src="`
	dumanStopKey  = `"`
)

func (d *Dumanhhua) start() {

	if strings.Index(info.ComicHome, "qiman") >= 0 ||
		*outPutMod >= 3 { // 使用qiman
		Warn("使用常规模式")
		normal()
		return
	}

	if *outPutMod == 1 || *outPutMod == 2 {
		Notice("读取json只生成reqList")
		d.ifUseNum = info.IfUseNum
		d.getReqList()

		// TODO 判断当前话有 数字的话 从新读取 网页内容 进行补全
		if d.haveNum && d.ifUseNum {
			Noticef("有数字并且有缺少 len=%v list=%v", len(d.missPage), d.missPage)
			time.Sleep(time.Second)
			d.paddReqList()
		}

		outPutJson()
		Notice("已生成json")

		return
	}

	d.downloadList()

}

func (g *Dumanhhua) downloadList() {
	Notice("开始下载漫画")

	c := make(chan os.Signal, 1)
	signal.Notify(c, os.Interrupt, os.Kill)

	go func() {
		<-c
		errDownMutex.Lock()
		if len(errDown) > 1 {
			fmt.Printf("发生异常的数据：%v", errDown)
		}
		errDownMutex.Unlock()

		str := ""
		m := 0
		for i := info.StartNum; i < info.StartNum+len(info.ReqList); i++ {
			okDownMutex.Lock()
			if !okDown[i] {
				str += fmt.Sprint(i, ",")
				m++
			}
			okDownMutex.Unlock()
		}
		Noticef("总共：%v 未完成=%v 列表=%v", len(info.ReqList), m, str)
		os.Exit(0)
	}()

	for n, v := range info.ReqList {
		if n&1 == 0 {
			continue
		}
		k := n / 2
		if len(v) < 10 {
			okDownMutex.Lock()
			okDown[info.StartNum+k] = true
			okDownMutex.Unlock()
			Notice(info.StartNum+k, " 不下载跳过")
			continue
		}
		wg.Add(1)
		go func(idx int, u string) {
			defer wg.Done()
			for i := 0; i < 10 && !g.getOne(idx, u); i++ {
			}

		}(k+info.StartNum, info.HeaderUrl+v)
	}
	wg.Wait()

	checkErrorDown()
}

// 获取一话完整图片
func (g *Dumanhhua) getOne(m int, urlReq string) (errFlag bool) {
	defer func() {
		okDown[m] = true
		Notice("\t\t\t", m, " over")
	}()

	headers := make(map[string]string)
	headers["Accept"] = "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3"
	headers["Cookie"] = "UM_distinctid=16be3fa014c951-0d0f815ec8a01a-1a201708-1fa400-16be3fa014dbfe; CNZZDATA1273814033=1260435515-1562896174-https%253A%252F%252Fwww.baidu.com%252F%7C1562896174"
	headers["User-Agent"] = "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36"
	code, body, err := httpReq(urlReq, "get", headers, make(url.Values)) // 请求主网址内容
	if err != nil {
		Error(err)
		return
	}
	// data-lazy-src="
	if code != 200 {
		if len(body) > 1024 {
			Errorf("code=%v url=%v body=%s", code, urlReq, body[:1024])
		} else {
			Errorf("code=%v url=%v body=%s", code, urlReq, body)
		}

		return
	}

	list := g.getImages(body) // 从主网址中分离目标图表
	Noticef("漫画=%v 个数=%v", m, len(list))
	dir := fmt.Sprint("./comic/", m, "/")
	err = os.MkdirAll(dir, os.ModePerm)
	if err != nil {
		Error(err)
		return
	}
	n := 1
	var wg sync.WaitGroup
	for _, v := range list { // 一个一个下载
		wg.Add(1)
		go func(n int, v string) {
			defer wg.Done()
			_, err = downloadImages(v, fmt.Sprintf("%vIMGE_%v.jpeg", dir, n))
			if err != nil {
				Errorf("m=%v n=%v err=%v", m, n, err)
				errDownMutex.Lock()
				errDown[m] = append(errDown[m], tempStruct{
					url:      v,
					fileName: fmt.Sprintf("%vIMGE_%v.jpg", dir, n),
				})
				errDownMutex.Unlock()
			}
		}(n, v)
		n++
	}
	wg.Wait()
	return true
}

// 从读取出来的 html中查找 chapterImages = [] 内容
func (g *Dumanhhua) getImages(body string) (imgList []string) {

	body, _ = getString(body, [2]string{"pagination-image", "章节内容加载中"})

	for {
		str, stopIndex := getString(body, [2]string{dumanStartKey, dumanStopKey})
		if len(str) < 1 {
			break
		}
		// NOTE 确认是网址 并且不是js
		if str[:4] == "http" && str[len(str)-3:] != ".js" {
			imgList = append(imgList, str)
		}
		body = body[stopIndex:] // 截断
	}

	return
}

func (d *Dumanhhua) paddReqList() {
	body := d.body
	for {
		// 获取一截 整体 li 控件
		liStr, stopIndex := getString(body, d.li)
		if stopIndex < 0 { // 没有li控件
			break
		}
		hrefStr, _ := getString(liStr, d.href)
		spanStr, _ := getString(liStr, d.span)
		// 检查里面 有 href 和 span  就截取 保存
		if len(hrefStr) < 1 || len(spanStr) < 1 {
			body = body[stopIndex:] // 截断
			continue
		}

		num := getNumberToStr(spanStr)
		_, ok := d.missPage[num]
		if ok {
			d.missPage[num] = [2]string{spanStr, hrefStr}
		}

		body = body[stopIndex:] // 截断
	}

	Notice(d.missPage)
	// NOTE 查找好以后复制
	var list []string
	for i := 0; i < len(info.ReqList); i += 2 {
		list = append(list, info.ReqList[i])
		list = append(list, info.ReqList[i+1])

		n := getNumberToStr(info.ReqList[i]) + 1
		_, ok := d.missPage[n]
		if ok {
			list = append(list, d.missPage[n][0])
			list = append(list, d.missPage[n][1])
		}
	}
	info.ReqList = list
}

func (d *Dumanhhua) getReqList() {
	code, body, err := httpReq(info.ComicHome, "GET", map[string]string{
		"Accept":          "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
		"Accept-Language": "zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3",
		"Connection":      "keep-alive",
		"User-Agent":      "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36",
	}, nil) // 请求主网址内容
	if err != nil {
		Error("getReqList " + err.Error())
		os.Exit(1)

	}
	outFile("body.dat", body)

	if code != http.StatusOK {
		Error(fmt.Sprintf("getReqList  code=%v url=%v  err=%s", code, info.ComicHome, body))
		os.Exit(1)
	}
	d.body = body

	// 跳到关键字
	n := strings.Index(body, info.Key)
	if n < 1 {
		Noticef("\n%s", body)
		Error("关键字未找到")
		os.Exit(1)
	}
	body = body[n:]
	var spans []string
	var hrefs []string
	for {
		// 获取一截 整体 li 控件
		liStr, stopIndex := getString(body, d.li)
		if stopIndex < 0 { // 没有li控件
			break
		}
		hrefStr, _ := getString(liStr, d.href)
		spanStr, _ := getString(liStr, d.span)
		// 检查里面 有 href 和 span  就截取 保存
		if len(hrefStr) < 1 || len(spanStr) < 1 {
			break
		}

		hrefs = append(hrefs, hrefStr)
		spans = append(spans, spanStr)

		body = body[stopIndex:] // 截断
	}

	n = 0
	var tempReq []string
	lastNum := 0

	// 检查是否可以数字化
	for n := 0; n < len(spans); n++ {
		i := n
		if *outPutMod == 2 {
			Notice("反转")
			i = len(spans) - n - 1
		}
		if strings.Index(spans[i], "活动") >= 0 {
			continue
		}
		if strings.Index(spans[i], "通知") >= 0 {
			continue
		}
		now := getNumberToStr(spans[i])
		if now > 0 && now > lastNum {
			if now-lastNum == 1 {
				d.haveNum = true && d.ifUseNum
			} else if now-lastNum < 5 { // NOTE 差距小于5话就补齐
				for i := lastNum + 1; i < now; i++ {
					d.missPage[i] = [2]string{}
				}
			}
			lastNum = now
		}

		tempReq = append(tempReq, spans[i])
		tempReq = append(tempReq, hrefs[i])

	}

	Noticef("总共：%v ", len(tempReq)/2)
	info.ReqList = tempReq
}
