#!/usr/bin/env python3

class NGram():
    def __init__(self, text, n=3):
        self.length = None
        self.n = n
        self.table = {}
        self.parseText(text)
        self.calculateLength()

    def parseText(self, text):
        chars = " " * self.n

        for letter in (" ".join(text.split()) + " "):
            chars = chars[1:] + letter
            self.table[chars] = self.table.get(chars, 0) + 1

    def calculateLength(self):
        self.length = sum([x * x for x in self.table.values()]) **  0.5
        return self.length

    def __sub__(self, other):
        if not isinstance(other, NGram):
            raise TypeError("Can't compare NGram with not-NGram object")

        if self.n != other.n:
            raise TypeError("Can't compare NGram object of different size")

        total = 0
        for k in self.table:
            total += self.table[k] * other.table.get(k, 0)

        return 1.0 - (float(total)) / (float(self.length) * float(other.length))

    def bestMath(self, languages):
        return min(languages, key=lambda n:self - NGram(n))


if __name__ == "__main__":
    textEN = u"But in reality, the two features reduce down to a single feature: sequences of characters. The frequency of individual characters is really just a frequency of a sequence of length 1."
    textZH = u"这只针对单独的一页进行排序，而没有对全部页面的电影进行排序 "
    textZH2 = u"这只针对单独的一页进行排序，而没有对全部页面的电影进行排序在匹配电影名字时是否可以同时匹配链接与名字、评分、评论组成数组 "

    # english = NGram(textZH2)
    # print(english)
    # print(english-NGram(textZH))

    languages = [textEN, textZH]
    n = NGram(textZH2).bestMath(languages)
    print(n)
    