package funmanga

import (
	"fmt"
	"git.oschina.net/gladmo/manga/model"
	"git.oschina.net/gladmo/manga/spider"
	"git.oschina.net/gladmo/manga/tools"
	xmlpath "gopkg.in/xmlpath.v2"
	"io"
	"net/http"
	"regexp"
	"strconv"
	"strings"
	"time"
)

//爬取入口地址
var baseUrl string = "http://www.funmanga.com/latest-chapters/"

//封面存在地址
var coverPath string = "manga/funmanga/cover/"

//漫画存在地址
var mangaPath string = "manga/funmanga/manga/"

var site string = "FM"

func Funmanga() bool {
	//获取baseUrl第一页数据
	mangaList, err := http.Get(baseUrl)
	defer mangaList.Body.Close()
	if err != nil {
		fmt.Println(err)
		return false
	}

	//获取漫画hash表
	mList := getMangas(mangaList.Body)
	getMangaInfo(mList)
	return true
}

//从首页获取漫画链接
func getMangas(body io.ReadCloser) map[string]spider.MangaList {
	node, err := xmlpath.ParseHTML(body)

	//建立一个[code]{name,url}的map
	mangaList := make(map[string]spider.MangaList)

	if err != nil {
		fmt.Println(err)
	}

	//获取所有<dt><a>节点
	mangaUrl := xmlpath.MustCompile("//dt/a")
	it := mangaUrl.Iter(node)

	for it.Next() {
		//取出所有链接
		href := xmlpath.MustCompile("@href")
		url, _ := href.String(it.Node())

		//取出所有漫画名称
		title := xmlpath.MustCompile("text()")
		n, _ := title.String(it.Node())
		name := strings.TrimSpace(n)

		//生成code
		code := tools.Encode(name)

		//加入map
		mangaList[code] = spider.MangaList{name, url}
	}
	return mangaList
}

func getMangaInfo(list map[string]spider.MangaList) {
	for k, v := range list {
		//请求之前先判断是否存在些漫画
		if !spider.HasManga(k) {
			info, _ := http.Get(v.Url)
			saveMangaInfo(info.Body, k, v.Name)
			info.Body.Close()
		}

		info, _ := http.Get(v.Url)
		defer info.Body.Close()
		fmt.Println("get chapters")
		//如果漫画已经存在，下载章节
		urls, titles := getChapters(info.Body)
		for key, value := range urls {
			titleArr := strings.Split(titles[key], "-")
			no := strings.TrimSpace(titleArr[len(titleArr)-1])

			fNo := tools.FormatNo(no)

			//章节是否存在
			manga_id := model.FindManga(k)
			if !spider.HasChapter(manga_id, fNo) {
				pageNo, ok := crawl(value, k, tools.ParseNo(fNo))
				if ok {
					//保存章节信息
					chapter := model.Chapter{
						Manga_id:    manga_id,
						Title:       titles[key],
						No:          fNo,
						Total_pages: pageNo,
						Entry_time:  time.Now().Unix(),
					}
					c, _ := model.AddChapter(chapter)

					//更新manga章节信息等
					model.UpdateManga(manga_id)

					//保存爬取记录
					log := model.Log{
						Manga_id:   manga_id,
						Chapter_id: c.Chapter_id,
						Site:       site,
						Code:       k,
						Title:      titles[key],
						Url:        value,
					}
					model.AddLog(log)
				}
			}
		}
	}
}

func crawl(url string, code string, no string) (pageNo int64, ok bool) {
	page, err := http.Get(url + "/all-pages")
	defer page.Body.Close()
	if err != nil {
		fmt.Println(err)
		return 0, false
	}

	// file, _ := os.Create("test.html")
	// str, err := ioutil.ReadAll(page.Body)
	// io.WriteString(file, string(str))
	node, err := xmlpath.ParseHTML(page.Body)

	imgs := xmlpath.MustCompile("//img[@class='img-responsive']/@src")
	it := imgs.Iter(node)

	dir := mangaPath + code + "/" + no + "/"
	fmt.Println(dir)

	pageNo = 0
	for it.Next() {
		pageNo++
		tools.GetChapterImg(dir, it.Node().String(), strconv.FormatInt(pageNo, 10))
	}

	return pageNo, true
}

func saveMangaInfo(body io.Reader, code string, title string) bool {

	node, err := xmlpath.ParseHTML(body)
	if err != nil {
		fmt.Println(err)
	}

	baseInfo := xmlpath.MustCompile("//dl[@class='dl-horizontal']")

	info := baseInfo.Iter(node)
	infoMap := map[string]string{
		"1": "altname",
		"2": "status",
		"3": "categories",
		"4": "language",
		"5": "author",
		"6": "artist",
	}

	//获取基本信息，加入到 dataMap 中
	dataMap := make(map[string]string)
	if info.Next() {
		for k, v := range infoMap {
			dd := xmlpath.MustCompile("dd[" + k + "]")
			value, ok := dd.String(info.Node())
			if !ok {
				value = ""
			}
			if value != "" && k == "3" {
				re := regexp.MustCompile(`\s+`)
				value = re.ReplaceAllString(value, " ")
			}
			dataMap[v] = strings.TrimSpace(value)
		}
	}
	//获取封面地址
	cPath := xmlpath.MustCompile("//div[@class='col-md-4']/img/@src")
	cover, ok := cPath.String(node)
	if !ok {
		cover = ""
	}
	dataMap["cover"] = cover

	//获取漫画描述
	summaryPath := xmlpath.MustCompile("//div[@class='note note-default margin-top-15']/p/text()")
	summary, ok := summaryPath.String(node)
	if !ok {
		summary = ""
	}

	dataMap["summary"] = summary

	//下载漫画封面
	fileName, err := tools.GetCover(coverPath, dataMap["cover"])
	if err != nil {
		fmt.Println(err)
	}
	dataMap["cover"] = fileName

	//保存数据库
	manga := model.Manga{
		Title:      title,
		Code:       code,
		Altname:    dataMap["altname"],
		Cover:      dataMap["cover"],
		Summary:    dataMap["summary"],
		Status:     dataMap["status"],
		Language:   dataMap["language"],
		Author:     dataMap["author"],
		Artist:     dataMap["artist"],
		Categories: dataMap["categories"],
	}
	return model.AddManga(manga)
}

func getChapters(body io.ReadCloser) ([]string, []string) {
	//获取xpath 对象
	node, err := xmlpath.ParseHTML(body)
	if err != nil {
		fmt.Println(err)
	}
	//生成xpath规则 url
	url := xmlpath.MustCompile("//*[@id='chapter_list']/ul/li/a/@href")

	uit := url.Iter(node)

	var urls []string
	for uit.Next() {
		urls = append(urls, uit.Node().String())
	}

	//生成xpath规则 title
	title := xmlpath.MustCompile("//*[@id='chapter_list']/ul/li/a/span[1]")
	tit := title.Iter(node)
	var titles []string
	for tit.Next() {
		titles = append(titles, strings.TrimSpace(tit.Node().String()))
	}

	return urls, titles
}
