#!/usr/bin/python3
import porter
import string
import json
import os
import math
import sys, getopt

p = porter.PorterStemmer()


# ban by the stopwords and stem
def newstem(word: str) -> str:
    # check in the cache
    if word not in cache:
        cache[word] = (p.stem(word.lower())).upper()
    word = cache[word]
    return word


# TO count the frequency
def frequency(word: str, doc, istitle):
    count = 1
    if istitle:
        count = 2
    # Every docs frequency
    if word not in doc_frequency[doc]:
        doc_frequency[doc][word] = count
    else:
        doc_frequency[doc][word] += count
    # ALL frequency
    if word not in doc_frequency["all"]:
        doc_frequency["all"][word] = count
    else:
        doc_frequency["all"][word] += count


# The function about lisa-all file
def lisa():
    # open lisa.all
    lisa = open("lisa/lisa.all.txt", "r")
    i = 1
    # Some variable to record some statements
    length = 0
    avg_len = 0
    count = 0
    istitle = True
    doc_frequency[1] = {}
    # read line by line
    for line in lisa:
        # eliminate the LF in the end of the line
        line = line[:-1]
        # to count the number of the doc
        if line.find("**********") == 0:
            istitle = True
            continue
        # To divide the title and the content
        if not len(line):
            istitle = False
            continue
        if line.find("Document ") == 0:
            i = int(line.split()[1])
            count += 1
            # to init the elements in the dict
            doc_frequency[i] = {}
            doc_frequency["length"][i] = 0
            continue
        # Removing punctuation
        line = line.translate(str.maketrans('', '', string.punctuation))
        # Split the line
        words = line.split()
        length = 0
        for word in words:
            # Ban by stopwords
            if word.lower() not in stopwords:
                length += 1
                # stem
                word = newstem(word)
                # count frequency
                frequency(word, i, istitle)
        doc_frequency["length"][i] += length
        avg_len += length
    avg_len = avg_len / count
    doc_frequency["others"] = {"avg_len": avg_len, "count": count}
    lisa.close()


# TO convert the two dictionary to one ditionay to save in a json file
def index():
    global doc_frequency
    # print(doc_frequency)
    with open('index.json', 'w') as file:
        json.dump(doc_frequency, file, indent=2)


# The BM25 Model main funciton
def BM25_main(words: list):
    avg_len = idx["others"]["avg_len"]
    count = idx["others"]["count"]
    bm25 = {}
    # Caculate the log formula front
    log = {}
    for word in words:
        c = counter(word)
        l = math.log((count - c + 0.5) / (c + 0.5))
        # If the answer of log formula is negative, treat it like a stopwords (zero)
        if l <= 0:
            continue
        else:
            log[word] = l
    # print(log)
    # loop to calculate the bm25
    for doc in idx:
        # Exclude unrelated dictionaries
        if doc == "all" or doc == "others" or doc == "length":
            continue
        # init variables
        bm = 0.0
        length = idx["length"][doc]
        for word in log:
            # Skip the dict that not include the key
            if word not in idx[doc]:
                continue
            fij = idx[doc][word]
            bm += (fij * 2) / (fij + 0.25 + (0.75 * length / avg_len))
        bm25[doc] = bm
    # Sort the return dict
    bm25 = sorted(bm25.items(), key=lambda e: e[1], reverse=True)

    return bm25


# count the number of doc includes the word
def counter(word: str):
    c = 0
    for doc in idx:
        if doc == "all" or doc == "others" or doc == "length":
            continue
        if word in idx[doc]:
            c += 1
    return c


# manual function
def manual():
    #################
    # Part1(manual) #
    #################
    # Get the query from the console
    user_input = input("Please input the query: ")
    # Convert to the list
    user_input = user_input.translate(str.maketrans('', '', string.punctuation))
    print("Results for query [", user_input, "]")
    user_input = user_input.split()
    words = []
    for word in user_input:
        if word.lower() not in stopwords:
            words.append(newstem(word).upper())
    results = BM25_main(words)
    x = 0
    for key, value in results:
        x += 1
        if x == 16:
            break
        key = str(key)
        print('{0:4d} {1:4s} {2:4f}'.format(x, key, value))


# evaluation function
def evaluation():
    # read queries file
    global queries
    queries = {}
    read_queries()
    # read relevance file
    global relevance
    relevance = {}
    read_relevance()
    # Get the bm25 model results every the queries
    global bm25_queries
    bm25_queries = {}
    for querie in queries:
        bm25_queries[querie] = BM25_main(queries[querie])[:15]
    global evaluation_results
    evaluation_results = {"Precision": 0, "Recall": 0, "P@10": 0, "R-precision": 0, "MAP": 0}
    evaluations()


# DO the evaluation model
def evaluations():
    # Precision model & Recall model
    precision()
    # P@10 model
    p10()
    # R-Precision
    r_precision()
    # Map
    MAP()

    print(evaluation_results)


# Do the precision model & Recall model
def precision():
    p = []
    r = []
    ret = 15
    relret = 0
    # read the relevance data one by one
    for i in relevance:
        rel = len(relevance[i])
        for data in relevance[i]:
            for bm in bm25_queries[i]:
                if str(data) == bm[0]:
                    relret += 1
        p.append(relret / ret)
        r.append(relret / rel)
        relret = 0
    # Calculate the average
    evaluation_results["Precision"] = sum(p) / len(p)
    evaluation_results["Recall"] = sum(r) / len(r)
    return


# Do the p10 model
def p10():
    relret = 0
    p = []
    c = 0
    # read the relevance data one by one
    for i in relevance:
        for data in relevance[i]:
            for bm in bm25_queries[i][:10]:
                if str(data) == bm[0]:
                    relret += 1
        p.append(relret / 10)
        relret = 0
    evaluation_results["P@10"] = sum(p) / len(p)


# Do the R-Precision
def r_precision():
    p = []
    relret = 0
    ret = 15
    should_break = False
    c = 0
    for i in relevance:
        rel = len(relevance[i])
        for bm in bm25_queries[i][:15]:
            if int(bm[0]) in relevance[i]:
                relret += 1
            if (relret / rel) >= 0.4:
                break
        p.append(relret / ret)
    evaluation_results["R-precision"] = sum(p) / len(p)


# MAP
def MAP():
    p = []
    s = 0
    x = 0
    y = 0
    for i in relevance:
        rel = len(relevance[i])
        for bm in bm25_queries[i][:15]:
            y += 1
            if int(bm[0]) in relevance[i]:
                x += 1
                s += (x/y)
        p.append(s/rel)
        s = 0
        x = 0
        y = 0
    print(p)
    evaluation_results["MAP"] = sum(p) / len(p)

# read queries file functions
def read_queries():
    print("Reading lisa.queries.txt ......")
    # Open file
    with open("lisa/lisa.queries.txt", "r") as qs:
        # TO judge is a new queries
        flag = True
        no = ""
        for line in qs:
            line = line[:-1]
            # if is a new queries
            if flag:
                no = line.split()[0]
                queries[no] = []
                flag = False
                # print(no)
                continue
            # at the end of the line, change the flag to False
            if line[-1] == "#":
                flag = True
            # Removing punctuation
            line = line.translate(str.maketrans('', '', string.punctuation))
            # Split the line
            words = line.split()
            # Do the same work like in the manuel
            for word in words:
                if word.lower() not in stopwords:
                    word = newstem(word)
                    queries[no].append(word)


# read relevance file functions
def read_relevance():
    print("Reading lisa.relevance.txt ......")
    with open("lisa/lisa.relevance.txt", "r") as re:
        # Make a list to store all data in the file
        re = re.read()
        re = re.split()
        # counter i & m
        i = 0
        m = 0
        for n in re:
            n = int(n)
            # if i equals zero, make a new list in dict
            if i == 0:
                m = str(n)
                relevance[m] = []
                i = -1
                continue
            # if i equals -1, means the number read now is the number of the docs
            if i == -1:
                i = n
                continue
            # in common loop, var i should cut one
            i -= 1
            # read the data in list
            relevance[m].append(n)


# main function
if __name__ == "__main__":
    # get system argv
    argv = sys.argv[1:]
    mode = ""
    # judge the opts and args
    try:
        opts, args = getopt.getopt(argv, "hm:")
    except getopt.GetoptError:
        print('test.py -m <manual,evaluation>')
        sys.exit(2)
    for opt, arg in opts:
        if opt == '-h':
            print('test.py -m <manual,evaluation>')
            sys.exit()
        elif opt == "-m":
            mode = arg
    # The Index Dictionary
    idx = {}
    print("Check is index file existed")
    # Firstly, read all the stopwords in the list
    stopwords = []
    file = open("stopwords.txt", "r")
    stopwords = file.read().splitlines()
    file.close()
    # A cache of the stemming
    cache = {}
    if not os.path.exists("index.json"):
        # Frendly words
        print("Not found the index file")
        print("Creating an Index file....")
        # count the lisa documents in a dictionary
        doc_frequency = {}
        # all to store the frequency of all the file
        doc_frequency["all"] = {}
        # length to store every docs' length
        doc_frequency["length"] = {}
        # Run main function
        lisa()
        index()
        print("Creat Index file done")
    else:
        print("Reading existed index file......")
    # Read the existed index file
    with open("index.json", "r") as i:
        idx = json.load(i)

    # if mode.lower() == "manual":
    #     manual()
    # elif mode.lower() == "evaluation":
    #     evaluation()
    # else:
    #     print('test.py -m <manual,evaluation>')
    #     sys.exit(2)
    evaluation()
