// Cleans up the bytes already slurped from an html file.
//
// example:
//  // slurp up the bytes from an html file somehow.
//  ...
//  // clean up the dirty html
//  var scrubber *Scrubber
//  scrubber = scrub.NewScrubber(bytesFromTheHtmlFile, urlOfTheHtmlFile)
//  scrubber.Scrub()
//  // Then get a node.
//  reader := bytes.NewReader(scrubber.Bytes())
//  tree, err := xmlpath.ParseHTML(reader)
//  if err != nil {
//  	// handle error
//  }
//  compiled, err := xmlpath.Compile("/html/body/p")
//  if err != nil {
//  	// handle error.
//  } else {
//  	iter := compiled.Iter(tree)
//  	for iter.Next() {
//  		node1 := iter.Node()
//  // Get the node's id.
//  		compiled2, err := xmlpath.Compile("@" + data.AWWWEAttributeId)
//  		if err != nil {
//  			// handle error.
//  		} else {
//  			iter2 := compiled2.Iter(node1)
//  			if iter2.Next() {
//  				node2 := iter2.Node()
//  				idString := node2.String()
//  				idInt, ok := data.StringToInt(idString)
//  				if !ok {
//  					// handle error.
//  				} else {
//  // Use the id to get the node from the scrubber's tree
//  					scrubbernode, ok := scrubber.GetNodeFromId(idInt)
//  					if !ok {
//  						// handle error.
//  					}
//  // Get the nodes voice string.
//  					voice := scrubbernode.VoiceString()
//  // Or set the nodes visibility
//  					scrubbernode.SetVisibility(false)
//  				}
//  			}
//  		}
//  	}
package scrub

import (
	"bytes"
	"unicode/utf16"
	"unicode/utf8"

	"code.google.com/p/go.text/unicode/norm"

	"code.google.com/awwwe-browser/data"
	"code.google.com/awwwe-browser/scrub/htmlsoup"
)

var htmlbytes []byte = []byte("html")

// This struct is used for cleaning up markup.
type Scrubber struct {
	sourceBytes                   []byte
	tree, htmlNode, currentParent *htmlsoup.SoupNode
	IDMap                         map[int]*htmlsoup.SoupNode
	scrubbed                      bool
}

// Creates a pointer to a new Scrubber struct.
// Param dirtyBytes is the bytes from the html web page.
// Param location is the url of the html web page.
func NewScrubber(dirtyBytes []byte, location string) *Scrubber {
	var v Scrubber
	v.sourceBytes = NormalizeBytes(dirtyBytes)
	v.tree = htmlsoup.NewDocumentNode(location)
	v.currentParent = v.tree
	v.IDMap = make(map[int]*htmlsoup.SoupNode)
	v.scrubbed = false
	v.htmlNode = nil
	return &v
}

// The Scrubber must come from the func NewScrubber so that it is initialized with the bytes from the dirty html file.
// Scrup converts the bytes from the dirty html file to text and tags.
// It builds the text and tags into a tree moving them around as needed and marking certain nodes as invalid or not suitable for output as bytes or strings.
func (this *Scrubber) Scrub() {
	if this.scrubbed || len(this.sourceBytes) == 0 || this.tree == nil {
		return
	}
	var lastLen, length int
	lastLen = -1
	for {
		length = len(this.sourceBytes)
		if lastLen == length {
			break
		} else {
			lastLen = length
		}
		if length == 0 {
			break
		} else {
			this.scrubHTML()
			this.scrubbed = true
		}
	}
}

func (this *Scrubber) SetTreeVisibility(visible bool) {
	this.tree.SetVisibility(visible)
}

// Use this only after Scrub()
// Returns the prefix of the first tag.
func (this *Scrubber) DefaultTagPrefix() string {
	return this.tree.DefaultTagPrefix()
}

// Use this only after Scrub()
// Returns a byte slice representation of the tree built with Scrub().
func (this *Scrubber) Bytes() []byte {
	if this.tree == nil {
		return data.EmptyBytes
	} else {
		return this.tree.Bytes()
	}
}

// Use this only after Scrub()
// Returns a string representation of the tree built with Scrub().
func (this *Scrubber) String() string {
	if this.tree == nil {
		return data.EmptyString
	} else {
		return this.tree.String()
	}
}

// Param id is a node's data.AWWWEAttributeId converted to an int.
// See the above example in the package overview.
func (this *Scrubber) GetNodeFromId(id int) (*htmlsoup.SoupNode, bool) {
	node, ok := this.IDMap[id]
	return node, ok
}

// Returns bb without the non utf-8 bytes.
// Param bb is the bytes that are untrusted.
func NormalizeBytes(bb []byte) []byte {
	var ibfr *bytes.Buffer
	obfr := bytes.NewBuffer(data.EmptyBytes)
	le16 := bb[0] == 0xfe && bb[1] == 0xff
	be16 := bb[0] == 0xff && bb[1] == 0xfe
	// remove bad runes
	if le16 || be16 {
		// utf-16
		// convert to utf-8
		// skip over BOM
		ibfr = bytes.NewBuffer(bb[2:])
		for {
			rune1, _, err := ibfr.ReadRune()
			if err == nil {
				rune2, _, err := ibfr.ReadRune()
				if err == nil {
					rune8 := utf16.DecodeRune(rune1, rune2)
					if rune8 != utf8.RuneError {
						obfr.WriteRune(rune8)
					}
				} else {
					break
				}
			} else {
				break
			}
		}
	} else {
		ibfr = bytes.NewBuffer(bb)
		for {
			rune8, _, err := ibfr.ReadRune()
			if err == nil {
				if rune8 != utf8.RuneError {
					obfr.WriteRune(rune8)
				}
			} else {
				break
			}
		}
	}
	// normalize
	nbfr := bytes.NewBuffer(data.EmptyBytes)
	nwtr := norm.NFC.Writer(nbfr)
	nwtr.Write(obfr.Bytes())
	nwtr.Close()
	return nbfr.Bytes()
}

// Searches the bytes sequentially for text and tags and adds them to the tree where they are properly ordered.
func (this *Scrubber) scrubHTML() {
	var textbb []byte
	var tagbb, prefixbb, namebb, attributesbb, valuebb []byte
	var lengthTag int
	var index, index2 int
	var isEndTag, isClosed bool
	var currentNode *htmlsoup.SoupNode
	var bfr *bytes.Buffer
	currentNode = nil
	index = bytes.Index(this.sourceBytes, data.TagStartBytes)
	if index > 0 {
		// there is text before '<'
		textbb = this.sourceBytes[:index]
		currentNode, this.currentParent = htmlsoup.AddNewTextNode(this.currentParent, textbb)
		this.sourceBytes = this.sourceBytes[len(textbb):]
	} else if index < 0 {
		// there is only text
		textbb = this.sourceBytes[:]
		currentNode, this.currentParent = htmlsoup.AddNewTextNode(this.currentParent, textbb)
		this.sourceBytes = this.sourceBytes[0:0]
	} else if index == 0 {
		// is this a doctype tag?
		tagbb = this.scrapeTag(data.DocTypeStartBytes, data.DocTypeEndBytes)
		lengthTag = len(tagbb)
		if lengthTag == 0 {
			tagbb = this.scrapeTag(data.DocTypeStartBytesLower, data.DocTypeEndBytes)
		}
		lengthTag = len(tagbb)
		if lengthTag > 0 {
			this.sourceBytes = this.sourceBytes[lengthTag:]
			if this.htmlNode == nil {
				valuebb = parseDocTypeTag(tagbb)
				currentNode, this.currentParent = htmlsoup.AddNewDocTypeNode(this.currentParent, valuebb)
			}
		} else {
			// is this a ie browser instruction comment tag?
			// ConditionalCommentStartString
			// is this a comment tag?
			tagbb = this.scrapeCommentTag(data.ConditionalCommentStartBytes, data.ConditionalCommentEndBytes)
			lengthTag = len(tagbb)
			if lengthTag > 0 {
				//
				this.sourceBytes = this.sourceBytes[lengthTag:]
				valuebb = parseConditionalCommentTag(tagbb)
				currentNode, this.currentParent = htmlsoup.AddNewConditionalCommentNode(this.currentParent, valuebb)
			} else {
				tagbb = this.scrapeCommentTag(data.CommentStartBytes, data.CommentEndBytes)
				lengthTag = len(tagbb)
				if lengthTag > 0 {
					//
					this.sourceBytes = this.sourceBytes[lengthTag:]
					valuebb = parseCommentTag(tagbb)
					currentNode, this.currentParent = htmlsoup.AddNewCommentNode(this.currentParent, valuebb)
				} else {
					// is this a directive tag?
					tagbb = this.scrapeTag(data.DirectiveStartBytes, data.DirectiveEndBytes)
					lengthTag = len(tagbb)
					if lengthTag > 0 {
						this.sourceBytes = this.sourceBytes[lengthTag:]
						namebb, attributesbb = parseDirectiveTag(tagbb)
						currentNode, this.currentParent = htmlsoup.AddNewDirectiveNode(this.currentParent, namebb, attributesbb)
					} else {
						// is this an instruction tag?
						tagbb = this.scrapeTag(data.ProcessorInstructionStartBytes, data.ProcessorInstructionEndBytes)
						lengthTag = len(tagbb)
						if lengthTag > 0 {
							this.sourceBytes = this.sourceBytes[lengthTag:]
							namebb, attributesbb = parseInstructionTag(tagbb)
							currentNode, this.currentParent = htmlsoup.AddNewInstructionNode(this.currentParent, namebb, attributesbb)
						} else {
							// is this a cdata tag?
							tagbb = this.scrapeCommentTag(data.CDataStartBytes, data.CDataEndBytes)
							lengthTag = len(tagbb)
							if lengthTag == 0 {
								tagbb = this.scrapeTag(bytes.ToLower(data.CDataStartBytes), bytes.ToLower(data.CDataEndBytes))
							}
							lengthTag = len(tagbb)
							if lengthTag > 0 {
								this.sourceBytes = this.sourceBytes[lengthTag:]
								valuebb = parseCdataTag(tagbb)
								currentNode, this.currentParent = htmlsoup.AddNewCdataNode(this.currentParent, valuebb)
							} else {
								// is this an html or xml tag?
								tagbb = this.scrapeTag(data.TagStartBytes, data.TagEndBytes)
								lengthTag = len(tagbb)
								if lengthTag > 0 {
									this.sourceBytes = this.sourceBytes[lengthTag:]
									prefixbb, namebb, attributesbb, isClosed, isEndTag = parseHTMLTag(tagbb)
									if isEndTag == false {
										if string(namebb) == string(htmlbytes) && this.htmlNode != nil {
											// html tag already added
											return
										}
										currentNode, this.currentParent = htmlsoup.OpenNewTagNode(this.currentParent, prefixbb, namebb, attributesbb, isClosed)
										this.IDMap[currentNode.ID()] = currentNode
										bfr = bytes.NewBuffer(namebb)
										if bfr.String() == "script" {
											// find the end script tag
											// capture the text inside the start and end tags.
											bfr = bytes.NewBuffer(data.EmptyBytes)
											bfr.WriteString("</script")
											index2 = bytes.Index(this.sourceBytes, bfr.Bytes())
											if index2 > 0 {
												textbb = this.sourceBytes[:index2]
												currentNode, this.currentParent = htmlsoup.AddNewTextNode(this.currentParent, textbb)
												this.sourceBytes = this.sourceBytes[index2:]
											}
										} else if bfr.String() == "SCRIPT" {
											// find the end script tag
											// capture the text inside the start and end tags.
											bfr = bytes.NewBuffer(data.EmptyBytes)
											bfr.WriteString("</SCRIPT")
											index2 = bytes.Index(this.sourceBytes, bfr.Bytes())
											if index2 > 0 {
												textbb = this.sourceBytes[:index2]
												currentNode, this.currentParent = htmlsoup.AddNewTextNode(this.currentParent, textbb)
												this.sourceBytes = this.sourceBytes[index2:]
											}
										} else if bfr.String() == "textarea" {
											// find the end textarea tag
											// capture the text inside the start and end tags.
											bfr = bytes.NewBuffer(data.EmptyBytes)
											bfr.WriteString("</textarea")
											index2 = bytes.Index(this.sourceBytes, bfr.Bytes())
											if index2 > 0 {
												textbb = this.sourceBytes[:index2]
												currentNode, this.currentParent = htmlsoup.AddNewTextNode(this.currentParent, textbb)
												this.sourceBytes = this.sourceBytes[index2:]
											}
										} else if bfr.String() == "TEXTAREA" {
											// find the end textarea tag
											// capture the text inside the start and end tags.
											bfr = bytes.NewBuffer(data.EmptyBytes)
											bfr.WriteString("</TEXTAREA")
											index2 = bytes.Index(this.sourceBytes, bfr.Bytes())
											if index2 > 0 {
												textbb = this.sourceBytes[:index2]
												currentNode, this.currentParent = htmlsoup.AddNewTextNode(this.currentParent, textbb)
												this.sourceBytes = this.sourceBytes[index2:]
											}
										}
									} else {
										this.currentParent = htmlsoup.CloseTagNode(this.currentParent, prefixbb, namebb)
									}
								}
							}
						}

					}
				}
			}
		}
	}
}

// Scrapes what is thought to be a tag for the start and end bytes.
// If it finds what looks like a tag it returns it.
// Else it returns an empty slice.
func (this *Scrubber) scrapeTag(startbb, endbb []byte) []byte {
	var i int
	if bytes.Index(this.sourceBytes, startbb) == 0 {
		i = bytes.Index(this.sourceBytes, endbb)
		if i > 0 && i < len(this.sourceBytes)-len(endbb) {
			tagbb := this.sourceBytes[0 : i+len(endbb)]
			// is there a start string inbetween the start and finish?
			if bytes.LastIndex(tagbb, data.TagStartBytes) == 0 &&
				bytes.Index(tagbb, data.TagEndBytes) == len(tagbb)-len(data.TagEndBytes) {
				// bytes.LastIndex(tagbb, startbb) == 0 {
				return tagbb
			}
		}
	}
	return data.EmptyBytes
}

// Scrapes what is thought to be a comment for the start and end bytes.
// If it finds what looks like a comment it returns it.
// Else it returns an empty slice.
func (this *Scrubber) scrapeCommentTag(startbb, endbb []byte) []byte {
	var i int
	if bytes.Index(this.sourceBytes, startbb) == 0 {
		i = bytes.Index(this.sourceBytes, endbb)
		if i > 0 && i < len(this.sourceBytes)-len(endbb) {
			return this.sourceBytes[0 : i+len(endbb)]
		}
	}
	return data.EmptyBytes
}

// Parses what has been evalutated to be a doctype tag.
// Returns the parts parsed.
func parseDocTypeTag(tagbb []byte) (valuebb []byte) {
	var start, end int
	start = len(data.DocTypeStartBytes)
	end = len(tagbb) - len(data.DocTypeEndBytes)
	valuebb = tagbb[start:end]
	return
}

// Parses what has been evalutated to be a processor instruction tag.
// Returns the parts parsed.
func parseInstructionTag(tagbb []byte) (namebb, attributesbb []byte) {
	var start, end, at int
	start = len(data.ProcessorInstructionStartBytes)
	end = len(tagbb) - len(data.ProcessorInstructionEndBytes)
	tagbb = tagbb[start:end]
	at = bytes.IndexAny(tagbb, data.WhiteSpaceString)
	if at < 0 {
		namebb = tagbb[:]
	} else if at > 0 {
		namebb = tagbb[:at]
	} else {
		namebb = data.EmptyBytes
	}
	start += len(namebb)
	attributesbb = tagbb[start:end]
	return
}

// Parses what has been evalutated to be a directive tag.
// Returns the parts parsed.
func parseDirectiveTag(tagbb []byte) (namebb, attributesbb []byte) {
	// name is before first white space
	var start, end, at int
	start = len(data.DirectiveStartBytes)
	end = len(tagbb) - len(data.DirectiveEndBytes)
	tagbb = tagbb[start:end]
	at = bytes.IndexAny(tagbb, data.WhiteSpaceString)
	if at < 0 {
		namebb = tagbb[:]
	} else if at > 0 {
		namebb = tagbb[:at]
	} else {
		namebb = data.EmptyBytes
	}
	start += len(namebb)
	attributesbb = tagbb[start:end]
	return
}

// Parses what has been evalutated to be a conditional comment tag.
// Returns the parts parsed.
func parseConditionalCommentTag(tagbb []byte) (valuebb []byte) {
	var start, end int
	start = len(data.CommentStartBytes)
	end = len(tagbb) - len(data.CommentEndBytes)
	valuebb = tagbb[start:end]
	return
}

// Parses what has been evalutated to be a comment tag.
// Returns the parts parsed.
func parseCommentTag(tagbb []byte) []byte {
	var start, end int
	start = len(data.CommentStartBytes)
	end = len(tagbb) - len(data.CommentEndBytes)
	if start > 0 && end > start {
		return tagbb[start:end]
	} else {
		return data.EmptyBytes
	}
}

// Parses what has been evalutated to be a cdata tag.
// Returns the parts parsed.
func parseCdataTag(tagbb []byte) (valuebb []byte) {
	var start, end int
	start = len(data.CDataStartBytes)
	end = len(tagbb) - len(data.CDataEndBytes)
	valuebb = tagbb[start:end]
	return
}

// Parses what has been evalutated to be an html tag.
// Returns the parts parsed.
func parseHTMLTag(tagbb []byte) (prefixbb, namebb, attributesbb []byte, isClosed, isEndTag bool) {
	var start, end, at int
	var splitbb [][]byte
	var lengthTSB int
	lengthTSB = len(data.TagSlashBytes)
	start = len(data.TagStartBytes)
	end = len(tagbb) - len(data.TagEndBytes)
	if bytes.Index(tagbb, data.TagSlashBytes) == start {
		start += lengthTSB
		isEndTag = true
	} else {
		isEndTag = false
	}
	if bytes.LastIndex(tagbb, data.TagSlashBytes) == end-lengthTSB {
		// this tag closes itself
		end -= lengthTSB
		isClosed = true
	} else {
		isClosed = false
	}
	tagbb = tagbb[start:end]
	// name is before first white space
	at = bytes.IndexAny(tagbb, data.WhiteSpaceString)
	if at < 0 {
		splitbb = bytes.Split(tagbb[:], data.ColonBytes)
		if len(splitbb) == 1 {
			prefixbb = data.EmptyBytes
			namebb = splitbb[0]
		} else {
			prefixbb = splitbb[0]
			namebb = splitbb[1]
		}
		attributesbb = data.EmptyBytes
	} else if at > 0 {
		// there is whitespace after the name
		splitbb = bytes.Split(tagbb[:at], data.ColonBytes)
		if len(splitbb) == 1 {
			prefixbb = data.EmptyBytes
			namebb = splitbb[0]
		} else {
			prefixbb = splitbb[0]
			namebb = splitbb[1]
		}
		attributesbb = bytes.Trim(tagbb[at:], data.WhiteSpaceString)
	} else {
		prefixbb = data.EmptyBytes
		namebb = data.EmptyBytes
		attributesbb = data.EmptyBytes
	}
	return
}
