package cats

import (
	"www2/sp"
	"github.com/jbowtie/gokogiri"
	"net/url"
	"log"
	"gopkg.in/mgo.v2/bson"
	"io/ioutil"
	"strings"
	"os"
	"www2/cfg"
)

func ToDocDest(url  string,fc *sp.Spider) (*sp.Doc,error) {
	tdoc:=&sp.Doc{}
	shtml:=&sp.Shtml{}
	err:= sp.ShtmlBongo.FindOne(bson.M{"furl":url},shtml)
	if err==nil{
		if  fc.SubDomain==sp.CD_mdn{
			tdoc,err=MdnHtmlToDocDest(shtml.Content,fc,url)
		}
		if fc.SubDomain==sp.CD_es{
			tdoc,err=ESToDocDest(shtml.Content,fc,url)
		}
		if fc.SubDomain==sp.CD_redis{
			tdoc,err=RsToDocDest(shtml.Content,fc,url)
		}
		if fc.SubDomain==sp.CD_w3{
			tdoc,err=W3ToDocDest(shtml.Content,fc,url)
		}
		if err==nil{
			tdoc.LM=shtml.LM
			tdoc.FUrl=shtml.FUrl
			tdoc.Id=shtml.Id
			tdoc.AfterFind(nil)

			return tdoc,nil
		}
	}
	return nil,err

}

func MdnHtmlToDocDest(str  string,fc *sp.Spider,refurl string ) (*sp.Doc,error) {
	refURL,_:=url.Parse(refurl)
	title_xpath:="/html/head/title";
	description_xpath:=`/html/head/meta[@name="description"]/@content`;
	content_xpath:=`/html/body/main/div/div/div`

	excludeXpath := []string{
		//`/html/head/link[@rel="alternate"]`,
		//`/html/head/meta[starts-with(@name,"twitter")]`,
		//		`/html/head/meta`,
		`/html/body/main/div/div/div/div[2]/div[2]`, //contributor-avatars
		`/html/body/main/div/div/div/div[1]/ul`, //class="page-buttons" /html/body/div/div[1]/ul
		`/html/body/main/div/div/div/div[3]/div/div[2]/div/div[2]`, //contributors-sub
		`/html/body/main/div/div/div/div[3]/div/div[2]/div/div[1]`, //class="tag-attach-list contributors-sub
	}
	htmldoc, err := gokogiri.ParseHtml([]byte(str))
	if err!=nil{return nil,err}
	defer htmldoc.Free()

	doct:=new(sp.Doc)

	nodes, _ := htmldoc.Search(title_xpath)
	if len(nodes)>0{
		doct.Title=nodes[0].Content()
	}
	nodes, _ = htmldoc.Search(description_xpath)
	if len(nodes)>0{
		doct.Des=nodes[0].String()
	}
	nodes, _ = htmldoc.Search(content_xpath)
	if len(nodes)>0{
		content_node:=nodes[0]
		//		log.Println(content_node.Path())
		for _, exlude := range excludeXpath {
			nodes, err := content_node.Search(exlude)
			if err == nil {
				for _, nod := range nodes {
					nod.Remove()
				}
			}
		}
		links,_:=content_node.Search(content_node.Path()+"//a/@href")
		for _, link := range links {
			linkURL,_:=url.Parse(link.String())
			iURL:=refURL.ResolveReference(linkURL)
			//iurl, err := fc.GetFUrl(iURL.String(), refurl)
			if err == nil {
				ok, _,etype := fc.ShouldFetched(iURL)
				//log.Println(iURL,ok,etype)

				//外站链接
				if !ok && etype>sp.FT_ShouldFetch && etype!=sp.FT_CrossDomain {
					//log.Println(iURL,ok,etype)
					err=link.SetContent(iURL.String())
					if err!=nil{
						log.Println(iURL,ok,etype)
						continue
					}
				}
				//完整改相对
				if ok && iURL.String()==link.String(){
					link.SetContent(sp.UrlRelativePath(iURL))
					log.Println("set RelativePath",linkURL)
				}
			}
			//			log.Println(link.Path())

		}

		doct.Content=content_node.String()
	}

	//	log.Println(title,description)

	return doct,err
}
func ShouldConvertLink(fc *sp.Spider, fURL *url.URL,rawUrl string) (bool,string){
	_, _,etype := fc.ShouldFetched(fURL)
	//log.Println(iURL,ok,etype)
	convert:=false
	cto:=""
	if (etype==sp.FT_Exclude || etype==sp.FT_NotSubdir)  {
		return true,fURL.String()
	}
	//FT_NotHttp =-1
	//FT_ShouldFetch =0
	//FT_CrossDomain=1
	//FT_Fetched=4
	//完整改相对
	if  (etype==sp.FT_Fetched ||etype==sp.FT_ShouldFetch ) && fURL.String()==rawUrl{
		return true,sp.UrlRelativePath(fURL)
	}
	return  convert,cto
}

func ConvertTempalteStaic( fc *sp.Spider ,fpath string ){
	onlyShowLink:=false
	bs,_:=ioutil.ReadFile(fpath)

	htmldoc, err := gokogiri.ParseHtml(bs)
	if err!=nil{
		log.Fatalln(err)
	}
	defer htmldoc.Free()

	jss,_:=htmldoc.Search("//script/@src")
	links,_:=htmldoc.Search("//link/@href")
	hrefs,_:=htmldoc.Search("//a/@href")

	//log.Println("len jss:",jss,links)

	refURL,_:=url.Parse(fc.Url)

	for _, res := range jss {
		if onlyShowLink{
			log.Println(res.String())
			continue
		}
		linkURL,_:=url.Parse(res.String())
		iURL:=refURL.ResolveReference(linkURL)

		if iURL.Host=="apps.bdimg.com" ||  iURL.Host=="res.quickdoc.cn" {continue}
		//download if not exists
		imgsrc:=iURL.String()
		sp.DownLoadImgWithCheck(imgsrc)
		//iurl, err := fc.GetFUrl(iURL.String(), refurl)
		imgurl:="http://"+*cfg.ImageDomain+"/"+iURL.String();
		res.SetContent(imgurl)
	}
	for _, res := range links {
		if onlyShowLink{
			log.Println(res.String())
			continue
		}
		linkURL,_:=url.Parse(res.String())
		iURL:=refURL.ResolveReference(linkURL)


		if iURL.Host=="apps.bdimg.com" || iURL.Host=="res.quickdoc.cn" {continue}
		//download if not exists
		imgsrc:=iURL.String()
		sp.DownLoadImgWithCheck(imgsrc)
		//iurl, err := fc.GetFUrl(iURL.String(), refurl)
		imgurl:="http://"+*cfg.ImageDomain+"/"+iURL.String();
		res.SetContent(imgurl)
	}
	for _, res := range hrefs {
		if onlyShowLink{
			log.Println(res.String())
			continue
		}
		linkURL,_:=url.Parse(res.String())
		iURL:=refURL.ResolveReference(linkURL)


		if iURL.Host=="apps.bdimg.com" || iURL.Host=="res.quickdoc.cn" || strings.HasSuffix(iURL.Host,*cfg.Domian)  {continue}
		//download if not exists
		ok,cto:=ShouldConvertLink(fc,iURL,res.String())
		log.Println(ok,cto,res.String())
		if(ok){
			res.SetContent(cto)
		}
	}
	if onlyShowLink{ return}
	ioutil.WriteFile(fpath+".new",[]byte(htmldoc.String()),os.ModePerm)
	log.Println("finish")

}

func GetMdnWebSpider() *sp.Spider{
	url:="https://developer.mozilla.org/zh-CN/docs/Web/"
	fc:=&sp.Spider{Url:url,
		OnlySub:true,
		//		LinksTest:true,
		ExcludeUrls:[]string{"$locales","$edit","$history","$translate"},
		SubDomain:sp.CD_mdn,
	}
	fc.Init()
	return fc
}


func GetMdnData(){
	//url:="https://developer.mozilla.org/zh-CN/docs/Web/JavaScript/Reference/Functions/Arrow_functions"
	//	url:="https://developer.mozilla.org/en-US/docs/Web/JavaScript/"
	//	url:="https://developer.mozilla.org/zh-CN/docs/Web/JavaScript/"
	//	url:="https://developer.mozilla.org/zh-CN/docs/Web/"
	fc:=GetMdnWebSpider()
	fc.StartCrawl1(false)

	//	url:="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Array/isArray"
	//	geted:=m.GetUrlTask(url)
	//	log.Println(geted)
	//	if geted{}
	//	doc,err:=m.GetHttpDataWithCache(url)

	//extract url
	//	log.Println(err,*m.ExtractXpathContent([]byte(doc.Content),`//*[@id="wiki-right"]`))
}