package sp
import (
	"net/http"
	"compress/gzip"
	"io"
	"io/ioutil"
//	"log"

//	"github.com/lestrrat/go-libxml2"
//	"github.com/lestrrat/go-libxml2/types"
//	"github.com/lestrrat/go-libxml2/xpath"
//	"github.com/lestrrat/go-libxml2/parser"

//	"github.com/lestrrat/go-libxml2/types"
	"github.com/jbowtie/gokogiri"
	"log"
	"github.com/djimenez/iconv-go"
	"fmt"
	"strings"
	"www2/util"
	"gopkg.in/mgo.v2/bson"
	"net/url"
	"errors"
	"bytes"
	"sync"
	"crypto/tls"
	"github.com/jinzhu/gorm"
	"path"
	//"go/doc"
	"time"
)

type Spider  struct
{
	FetchedCounter int
	Mysql *gorm.DB

	Url string //https://developer.mozilla.org/en-US/docs/Web
	OnlySub bool
	ExcludeUrls []string
	LinksTest bool
	SubDomain string
	schema string
	domain string
	siturl string
	basepath string
}


func(fc *Spider)Init(){
	//	fc.domain=
	httpurl,err:= url.Parse(fc.Url)
	if err!=nil  {
		log.Fatal(err)
	}
	fc.domain=httpurl.Host
	fc.siturl=httpurl.Scheme+"://"+fc.domain
	fc.schema=httpurl.Scheme

	tpath:=httpurl.Path
	tpath=path.Dir(tpath)
	fc.basepath=tpath
	//	log.Println(fc)
}
func(fc *Spider)GetShtmlStat() int{
	reg:=bson.RegEx{Pattern:"^"+fc.Url}
	query:=bson.M{"furl":bson.M{"$regex":reg}}
	count,_:=ShtmlMgo.Find(query).Count()
	return count
}


func  UrlStr(u *url.URL) string {
	var buf bytes.Buffer
	buf.WriteString(u.Scheme)
	buf.WriteByte(':')
	buf.WriteString("//")
	buf.WriteString(u.Host)
	if u.Path==""{
		buf.WriteByte('/')
	}
	buf.WriteString(u.Path)
	if u.RawQuery != "" {
		buf.WriteByte('?')
		buf.WriteString(u.RawQuery)
	}
	return buf.String()
}
func  UrlPathAndQuery(u *url.URL) string {
	var buf bytes.Buffer
	buf.WriteString(u.Path)
	if u.RawQuery != "" {
		buf.WriteByte('?')
		buf.WriteString(u.RawQuery)
	}
	return buf.String()
}
func  UrlRelativePath(u *url.URL) string {
	var buf bytes.Buffer
	buf.WriteString(u.Path)
	if u.RawQuery != "" {
		buf.WriteByte('?')
		buf.WriteString(u.RawQuery)
	}
	if u.Fragment != "" {
		buf.WriteByte('#')
		buf.WriteString(u.Fragment)
	}
	return buf.String()
}


var httpClient *http.Client = &http.Client{
	Transport:
	&http.Transport{
		TLSClientConfig: &tls.Config{InsecureSkipVerify: true,
		}, // disable verify
		TLSHandshakeTimeout:0,
		//		ResponseHeaderTimeout
	},
}

func downLoadImg(url string)(*SImg,error) {

	request, err := http.NewRequest("GET", url , nil)
	if err != nil {
		return nil,err
	}
	res, err :=httpClient.Do(request)
	//res,err:=httpClient.Get(url)
	if err != nil {
		log.Println("url get error",url)
		return nil,err
	}
	defer res.Body.Close()

	img:=new(SImg)
	img.FUrl=url
	if res.Header.Get("Last-Modified")!=""{
		img.LM=res.Header.Get("Last-Modified");
	}
	img.CT=res.Header.Get("Content-Type");
	bs,_:=ioutil.ReadAll(res.Body)
	img.Content=bs
	return img,nil
}

//for https http://stackoverflow.com/questions/12122159/golang-how-to-do-a-https-request-with-bad-certificate
func getHttpHtmlDataWithGzip(url *url.URL)(*Shtml,error) {
	request, err := http.NewRequest("GET", UrlStr(url) , nil)
	if err != nil {
		return nil,err
	}
	request.Header.Add("Accept-Encoding", "gzip")
	response, err := httpClient.Do(request)
//	response, err := client.Get(url)
	if err != nil {
		return nil,err
	}
	defer response.Body.Close() // Check that the server actual sent compressed data

	var reader io.ReadCloser
	switch response.Header.Get("Content-Encoding") {
	case "gzip":
//		fmt.Println("from gzip")
		reader, err = gzip.NewReader(response.Body)
		if err != nil {
			fmt.Println(err)
			return nil,err
		}
		defer reader.Close()
	default: reader = response.Body
	}

	doc:=new(Shtml)
	if response.Header.Get("Last-Modified")!=""{
		doc.LM=response.Header.Get("Last-Modified");
	}
	doc.CT=response.Header.Get("Content-Type");

//	isHtml:=strings.HasPrefix(doc.CT,"text/html")
	bytes,_:=ioutil.ReadAll(reader);
    	//isutf8:= strings.LastIndex(response.Header.Get("Content-Type"),"utf-8")>-1
    	isutf8:= true
	if !isutf8 {
		out:=make([]byte,len(bytes))
		conv,_:=iconv.NewConverter("gb2312","utf-8")
		conv.Convert(bytes,out)
		doc.Content=string(out)
	}else{
		doc.Content=string(bytes);
	}

	return doc,nil

}

const(
	FS_Cache=1
	FS_Web=0
)
//图片css下载时
var resUrlLock sync.Mutex
func  GetUrlTask4Res(url string) bool{
	resUrlLock.Lock()
	defer resUrlLock.Unlock()
	ok,err:=util.Client.HSetNX("tasks",url,"1").Result()
	if err!=nil{
		log.Fatal("GetUrlTask:",err)
	}
	return ok;
}
func DownLoadImgWithCheck(url string){
	if GetUrlTask4Res(url) {
		count, err := SImgMgo.Find(bson.M{"furl":url}).Count()
		if count == 0 {
			log.Println("download Static", url)
			if (err == nil) {
				img, err := downLoadImg(url)
				if err == nil {

					log.Println("saved ", img.FUrl, url)
					err = SImgBongo.Save(img)
					return
				}
			}
			log.Println(err, url)
		}
	}
	return
}
func GetHttpHtmlDataWithCache(url *url.URL)(*Shtml, int,error) {
	doc:=&Shtml{}
	err:= ShtmlBongo.FindOne(bson.M{"furl":url},&doc)
	if err==nil{
		return doc,FS_Cache,nil
	}
	if err!=nil && "Document not found"!=err.Error(){
		log.Println("err fetch fromCache:", url,err )
	}

	doc,err= getHttpHtmlDataWithGzip(url)
	if err==nil{
		doc.FUrl=UrlStr(url)
		ShtmlBongo.Save(doc)
		return doc,FS_Web,nil
	}
	return nil,FS_Web,err
}

const(
	FT_NotHttp =-1
	FT_ShouldFetch =0
	FT_CrossDomain=1
	FT_Exclude=2
	FT_NotSubdir=3
	FT_Fetched=4
)
//只取子目录; 不跨域
func(fc *Spider)ShouldFetch(furl *url.URL) (bool,error, int){
	if !strings.HasPrefix( furl.Scheme,"http"){
		return false,errors.New("not http protocol"),FT_NotHttp
	}

	if fc.domain!=furl.Host{
		return false, errors.New("cross domain  " + furl.String()),FT_CrossDomain
	}

	for _,item:=range fc.ExcludeUrls{
		if strings.LastIndex(furl.Path,item)>-1{
			return false, errors.New("ExcludeUrl:" + furl.String()),FT_Exclude
		}
	}

	if fc.OnlySub {
		if strings.HasPrefix(furl.Path,fc.basepath){
			return true, nil, FT_ShouldFetch
		}else{
			return false, errors.New("not subdir " + furl.String()),FT_NotSubdir
		}
	}
	return  true,nil, FT_ShouldFetch
}

func(fc *Spider)ShouldFetched(furl  *url.URL) (bool,error, int){
	n,err:= ShtmlMgo.Find(bson.M{"furl":UrlStr(furl)}).Count()
	if err==nil && n>0{
		return true,nil,FT_Fetched
	}
	return fc.ShouldFetch(furl)
}

var urlLock sync.Mutex
func (m *Spider) GetUrlTask4Spider(url *url.URL) bool{
	if m.LinksTest{return true}
	urlLock.Lock()
	defer urlLock.Unlock()
	ok,err:=util.Client.HSetNX("tasks",UrlStr(url),"1").Result()
	if err!=nil{
		log.Fatal("GetUrlTask:",err)
	}
	util.Client.Expire("tasks",time.Hour*10)
	if ok{
		m.FetchedCounter +=1;
	}
	return ok;
}

func (m *Spider) continueCrawl(){
	qregex:=bson.RegEx{Pattern:"^"+m.Url}
	log.Println("continueCrawl start",m.Url,qregex)
	query:=bson.M{"furl":bson.M{"$regex":qregex}}

	urls:=ShtmlMgo.Find(query).
	Select(bson.M{"furl": 1}).
	Sort("-_created").
	Limit(100).
	Iter()
	furl:=map[string]string{}
	for  urls.Next(furl){
		tURL,_:=url.Parse(furl["furl"])
		m.crawl1(tURL,nil)
	}
	log.Println("continueCrawl end")
}

var tasks chan string
var wg sync.WaitGroup
func (m *Spider)StartCrawl1(continu bool) {
	log.Println("begin fetch")
	m.FetchedCounter =0;
//	procFun:=func( item []string){
//		iurl,furl:=item[0],item[1]
//		if m.LinksTest{log.Println(iurl);return}
//		m.crawl(iurl,furl)
//	}
//	tasks = make(chan []string, 10)
//	go func(){util.PoolProcess(&tasks,procFun,&wg)}()

	tasks = make(chan string, 8)
	beginUrl,err:=url.Parse(m.Url)
	if err!=nil {
		log.Fatalln(err)
	}

	if beginUrl.Path=="" {
		log.Fatalln("path error, eg: http://www.baidu.com must be http://www.baidu.com/")
	}
	if continu{
		m.continueCrawl()
	}else{
		m.crawl1(beginUrl,nil )
	}
	log.Println("begin Wait")
	wg.Wait()
	log.Println("end fetch")
}

func (m *Spider)crawl1(furl ,rurl *url.URL) {
	wg.Add(1)
	tasks <-""
	defer  func(){
		wg.Done()
	}()
	doc,s, err := GetHttpHtmlDataWithCache(furl)
	<-tasks

	if err!=nil{
		msg:=fmt.Sprintln( "url fetched err:", err, ",source:",s,UrlStr(furl),",rurl:",rurl)
		log.Println(msg)
		util.Client.HSet("tasks_error",UrlStr(furl),msg)
	}else{
		log.Println("url fetched",s,":",furl,"counter:",m.FetchedCounter)
		htmldoc, err := gokogiri.ParseHtml([]byte(doc.Content))
		defer htmldoc.Free()
		if err==nil{
			//	nodes, err := doc.Find(xpath)
			links, err := htmldoc.Search("//a/@href")
			if err==nil{
				for _, v := range links {
					vurl,err:=url.Parse(v.String())
					if vurl==nil{continue}
					iurl:=furl.ResolveReference(vurl)
					//iurl, err := m.GetFUrl(v.String(), furl)
					if err == nil {
						ok, _ ,_:= m.ShouldFetch(iurl)

						if ok && m.GetUrlTask4Spider(iurl) {
//								tasks <- []string{iurl,furl}
							if m.LinksTest{log.Println(UrlStr(iurl));continue}
							go func() {
								m.crawl1(iurl,furl)
							}()
							continue
						}
//							log.Println(err)
						continue
					}
//						log.Println(err, v.String())
				}
			}
		}
	}

}
func RetryErrUrls(){
	log.Println("begin RetryErrUrls")
	urls,err:=util.Client.HKeys("tasks_error").Result()
	for idx,curl:=range urls{
		cURL,err:=url.Parse(curl)
		if err==nil{
			_,s, err := GetHttpHtmlDataWithCache(cURL)
			if err==nil {
				util.Client.HDel("tasks_error", curl)
				log.Println("url fetched :", curl, ",source:", s, "idx:", idx)
				continue
			}
			continue
		}
		if err!=nil{
			msg:=fmt.Sprintln( "url fetched err:", err, curl)
			log.Println(msg)

		}

	}

	log.Println("end RetryErrUrls",err,len(urls))
}


//http://es.quickdoc.cn/guide/en/elasticsearch/reference/current/search-template.html



