import os
import json
import math
from tqdm import tqdm
from collections import defaultdict
from transformers import BertTokenizer


def get_postings(tokenizer):
    doc_nums = 0
    postings = defaultdict(dict)
    if os.path.exists("postings.json"):
        with open("postings.json", "r", encoding='utf-8') as f:
            postings = json.load(f)
            return postings

    with open("all_news.jsonl", "r", encoding='utf-8') as f:
        lines = f.readlines()

    for idx, line in enumerate(tqdm(lines)):
        doc_nums += 1
        line = tokenizer.tokenize(json.loads(line.strip())["content"])
        d_num = {}
        for te in line:
            if te in d_num.keys():
                d_num[te] += 1
            else:
                d_num[te] = 1
        for te in d_num.keys():
            d_num[te] = math.log(d_num[te]) + 1

        # normalize 
        nor = 0
        for te in d_num.keys():
            nor = nor + d_num[te]
        if nor == 0:
            print(line)
        nor = 1.0 / math.sqrt(nor)
        for te in d_num.keys():
            d_num[te] = d_num[te] * nor 

        unique_terms=set(line)
        for te in unique_terms:
            postings[te][idx] = d_num[te]

    with open("postings.json", "w", encoding='utf-8') as f:
        json.dump(postings, f)

    return postings # postings stores term frequency in each document of a term


def tfidf_score_rank(query, k=10):
    t_num = {}
    doc_nums = 72838
    score_tid = defaultdict(dict)

    for te in query:
        if te in t_num:
            t_num[te] += 1
        else:
            t_num[te] = 1

    for te in t_num.keys():
        if te in postings:
            d_fre = len(postings[te]) # document frequency, how many documents contain this term 
        else:
            d_fre = doc_nums # tf*idf will be 0
        # t_num[te] = math.log(doc_nums/d_fre)
        t_num[te] = (math.log(t_num[te])+1) * math.log(doc_nums/d_fre) # the former is the tf in this query

    for te in query:
        if te in postings:
            for tid in postings[te]:
                if tid in score_tid.keys():
                    score_tid[tid] += postings[te][tid] * t_num[te] # tf*idf
                else:
                    score_tid[tid] = postings[te][tid] * t_num[te]
    
    similarity = sorted(score_tid.items(), key=lambda x: x[1], reverse=True)
    return similarity[:k]


if __name__ == '__main__':
    tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
    postings = get_postings(tokenizer)
    query = "Sixers start to sweat with Raptors revival in full swing"
    query = list(set(tokenizer.tokenize(query)))
    ranked_candidates = tfidf_score_rank(query)
    print(ranked_candidates)
