import math


class Tokenize:
    def __init__(self, stopWords):
        self.stopWords = stopWords

    def getTokens(self, sentence):
        return list(
            filter(
                lambda word: word not in self.stopWords,
                map(
                    lambda word: "".join(list(filter(lambda char: 'a' <= char <= 'z', word.lower()))),
                    sentence.split(" ")
                    )
            )
        )

class TFIDF:
    def __init__(self, docs, tokenizeTool: Tokenize):
        self.docs = docs
        self.docsFormat = [tokenize.getTokens(doc) for doc in self.docs]
        self.tokenizeTool = tokenize

    @staticmethod
    def getTF(doc, word):
        cnt = 0
        for w in doc:
            if w == word:
                cnt += 1
        return math.log10(cnt / (len(doc) + 1)) if cnt > 0 else 0

    @staticmethod
    def cosSimilarity(docVector, queryVector):
        queryNorm = 0
        docNorm = 0
        dotProduct = 0
        for word, tfidf in queryVector.items():
            queryNorm += tfidf ** 2
            if word in docVector:
                dotProduct += tfidf * docVector[word]
        for _, tfidf in docVector.items():
            docNorm += tfidf ** 2
        if docNorm==0 or queryNorm==0:
            return 0
        return dotProduct / (math.sqrt(queryNorm) * math.sqrt(docNorm))

    def addDoc(self, doc):
        self.docs.append(doc)
        self.docsFormat.append(self.tokenizeTool.getTokens(doc))

    def getTFByIdx(self, idx, word):
        return self.getTF(self.docsFormat[idx], word)

    def getIDF(self, word):
        total = 1
        cnt = 0
        for doc in self.docsFormat:
            for w in doc:
                if w == word:
                    cnt += 1
                total += 1
        return math.log10(cnt / total) if cnt > 0 else 0

    def getDocVectors(self):
        docVectors = []
        for idx, doc in enumerate(self.docsFormat):
            docVectors.append({})
            for w in doc:
                docVectors[-1][w] = self.getIDF(w) * self.getTFByIdx(idx, w)
        return docVectors

    def query(self, queryWords):
        # 获取当前文档的TFIDF向量
        docVectors = self.getDocVectors()

        # 获取查询词的TFIDF向量
        tokenizedQueryWords = self.tokenizeTool.getTokens(queryWords)
        queryVector = {}
        for word in tokenizedQueryWords:
            tf = self.getTF(tokenizedQueryWords, word)
            idf = self.getIDF(word)
            queryVector[word] = tf * idf
        results = []
        for idx, vector in enumerate(docVectors):
            results.append((idx, self.cosSimilarity(vector, queryVector)))
        return map(lambda item: (self.docs[item[0]], item[1]),sorted(results, key = lambda item:item[1], reverse=True))


# 示例文档集合
documents = [
    "The sky is blue.",
    "The sun is bright.",
    "The sun in the sky is bright.",
    "We can see the shining sun, the bright sun."
]

if __name__ == '__main__':
    tokenize = Tokenize(["the", "is", "and"])
    tfidfTool = TFIDF(documents, tokenize)
    print(list(tfidfTool.query("blue sky bright shining")))