package main

import (
	"fmt"
)

// defaultProb is the tiny non-zero probability that a word
// we have not seen before appears in the class.
const defaultProb = 0.00000000001

type Class string

type Classifier struct {
	Classes []Class
	learned int
	seen    int
	datas   map[Class]*classData
}

type classData struct {
	Freqs map[string]int
	Total int
}

// NewClassifier returns a new classifier. The classes provided
// should be at least 2 in number and unique, or this method will
// panic.
func NewClassifier(classes ...Class) (c *Classifier) {
	n := len(classes)

	// check size
	if n < 2 {
		panic("provide at least two classes")
	}

	// check uniqueness
	check := make(map[Class]bool, n)
	for _, class := range classes {
		check[class] = true
	}
	if len(check) != n {
		panic("classes must be unique")
	}
	// create the classifier
	c = &Classifier{
		Classes: classes,
		datas:   make(map[Class]*classData, n),
	}
	for _, class := range classes {
		c.datas[class] = newClassData()
	}
	return
}

// newClassData creates a new empty classData node.
func newClassData() *classData {
	return &classData{
		Freqs: make(map[string]int),
	}
}

//	为某个分类创建classData数据，单词出现的频率（次数）用map记录，还有总的单词数目，记录学习了多少learned
func (c *Classifier) Learn(document []string, which Class) {
	data := c.datas[which]
	for _, word := range document {
		data.Freqs[word]++
		data.Total++
	}
	c.learned++
}

// getPriors returns the prior probabilities for the
// classes provided -- P(C_j).
//
// TODO: There is a way to smooth priors, currently
// not implemented here.
func (c *Classifier) getPriors() (priors []float64) {
	n := len(c.Classes)
	priors = make([]float64, n, n)
	sum := 0
	for index, class := range c.Classes {
		total := c.datas[class].Total
		priors[index] = float64(total)
		sum += total
	}
	if sum != 0 {
		for i := 0; i < n; i++ {
			priors[i] /= float64(sum)
		}
	}
	return
}

// getWordsProb returns P(D|C_j) -- the probability of seeing
// this set of words in a document of this class.
//
// Note that words should not be empty, and this method of
// calulation is prone to underflow if there are many words
// and their individual probabilties are small.
func (d *classData) getWordsProb(words []string) (prob float64) {
	prob = 1
	for _, word := range words {
		prob *= d.getWordProb(word)
	}
	return
}

// getWordProb returns P(W|C_j) -- the probability of seeing
// a particular word W in a document of this class.
func (d *classData) getWordProb(word string) float64 {
	value, ok := d.Freqs[word]
	if !ok {
		return defaultProb
	}
	return float64(value) / float64(d.Total)
}

// ProbScores works the same as LogScores, but delivers
// actual probabilities as discussed above. Note that float64
// underflow is possible if the word list contains too
// many words that have probabilities very close to 0.
//
// Notes on underflow: underflow is going to occur when you're
// trying to assess large numbers of words that you have
// never seen before. Depending on the application, this
// may or may not be a concern. Consider using SafeProbScores()
// instead.
// func (c *Classifier) ProbScores(doc []string) (scores []float64, inx int, strict bool) {
// 	n := len(c.Classes)
// 	scores = make([]float64, n, n)
// 	priors := c.getPriors()
// 	sum := float64(0)
// 	// calculate the score for each class
// 	for index, class := range c.Classes {
// 		data := c.datas[class]
// 		// c is the sum of the logarithms
// 		// as outlined in the refresher
// 		score := priors[index]
// 		for _, word := range doc {
// 			score *= data.getWordProb(word)
// 		}
// 		scores[index] = score
// 		sum += score
// 	}
// 	for i := 0; i < n; i++ {
// 		scores[i] /= sum
// 	}
// 	inx, strict = findMax(scores)
// 	c.seen++
// 	return scores, inx, strict
// }

const (
	Good Class = "Good"
	Bad  Class = "Bad"
)

func main() {
	c := NewClassifier("Good", "Bad")
	goodStuff := []string{"tall", "rich", "handsome", "tall", "sexy"}
	badStuff := []string{"ugly", "smell", "poor"}
	c.Learn(goodStuff, Good)
	c.Learn(badStuff, Bad)

	fmt.Println(c.datas["Good"])
	fmt.Println(c.datas[Bad])

	//计算分类的先验概率
	priors := c.getPriors()
	//这里一共就8个单词，第一个分类5个单词，第二个分类3个单词
	fmt.Println(priors)

	n := len(c.Classes)
	doc := []string{"tall", "man", "smelly", "rich"}
	scores := make([]float64, n, n)
	//priors := c.getPriors()
	sum := float64(0)
	// calculate the score for each class
	for index, class := range c.Classes {
		data := c.datas[class]
		// c is the sum of the logarithms
		// as outlined in the refresher
		score := priors[index]
		fmt.Println("before getWordProb :", score, data)
		for _, word := range doc {
			score *= data.getWordProb(word)
			fmt.Println("getWorkProb: ", word, score, data.getWordProb(word))
		}
		scores[index] = score
		sum += score
	}
	fmt.Println(scores)
	fmt.Println(sum)
}
