import csv
import ngram
import MMC
import random
import json

def divide_dataset():
    filename = './ngram_statistics/size_100/3MMC/node_pair.csv'
    filename2 = './ngram_statistics/size_100/3MMC/train1000k.csv'
    filename3 = './ngram_statistics/size_100/3MMC/train2000k.csv'
    filename4 = './ngram_statistics/size_100/3MMC/train3000k.csv'
    filename5 = './ngram_statistics/size_100/3MMC/train4000k.csv'
    filename6 = './ngram_statistics/size_100/3MMC/train5000k.csv'
    filename7 = './ngram_statistics/size_100/3MMC/test10k.csv'
    filename8 = './ngram_statistics/size_100/3MMC/shuffle_pair.csv'
    
    with open(filename, "r") as f:
        reader = csv.reader(f)
        trajectories = []
        for row in reader:
            trajectories.append(row)
        random.shuffle(trajectories)
    test_set = trajectories[:10000]
    train_set1 = trajectories[10000:1010000]
    train_set2 = trajectories[10000:2010000]
    train_set3 = trajectories[10000:3010000]
    train_set4 = trajectories[10000:4010000]
    train_set5 = trajectories[10000:5010000]
    
    with open(filename2, 'w', newline='') as csvfile:
        spamwriter = csv.writer(csvfile)
        spamwriter.writerows(train_set1)

    with open(filename3, 'w', newline='') as csvfile:
        spamwriter = csv.writer(csvfile)
        spamwriter.writerows(train_set2)

    with open(filename4, 'w', newline='') as csvfile:
        spamwriter = csv.writer(csvfile)
        spamwriter.writerows(train_set3)

    with open(filename5, 'w', newline='') as csvfile:
        spamwriter = csv.writer(csvfile)
        spamwriter.writerows(train_set4)

    with open(filename6, 'w', newline='') as csvfile:
        spamwriter = csv.writer(csvfile)
        spamwriter.writerows(train_set5)

    with open(filename7, 'w', newline='') as csvfile:
        spamwriter = csv.writer(csvfile)
        spamwriter.writerows(test_set)

    with open(filename8, 'w', newline='') as csvfile:
        spamwriter = csv.writer(csvfile)
        spamwriter.writerows(trajectories)

def predict():
    acc = 0

    with open('./ngram_statistics/size_100/2MMC/bigram_4000k.json') as bf:
        prob_matrix = json.load(bf)
    
    testfile = './ngram_statistics/size_100/2MMC/test10k.csv'
    with open(testfile, "r") as tf:
        reader = csv.reader(tf)
        for row in reader:
            curNode = row[0]
            nextNode = row[1]
            pred_key = ''
            pred_val = -1

            if prob_matrix.__contains__(curNode):
                for key in prob_matrix[curNode]:
                    if key == 'CNT':
                        continue
                    if prob_matrix[curNode][key] > pred_val:
                        pred_key = key
                        pred_val = prob_matrix[curNode][key]
                if pred_key == nextNode:
                    acc += 1
            

    acc = float(acc/10000)
    return acc


def predict_3gram():
    acc = 0

    with open('./ngram_statistics/size_100/3MMC/trigram_4000k.json') as bf:
        prob_matrix = json.load(bf)
    
    testfile = './ngram_statistics/size_100/3MMC/test10k.csv'
    with open(testfile, "r") as tf:
        reader = csv.reader(tf)
        for row in reader:
            prev= row[0]
            cur = row[1]
            nextNode = row[2]
            k = prev+'_'+cur
            pred_key = ''
            pred_val = -1

            if prob_matrix.__contains__(k):
                for key in prob_matrix[k]:
                    if key == 'CNT':
                        continue
                    if prob_matrix[k][key] > pred_val:
                        pred_key = key
                        pred_val = prob_matrix[k][key]
                if pred_key == nextNode:
                    acc += 1
            

    acc = float(acc/10000)
    return acc

if __name__ == '__main__':
    #construct bigram model
    # ngram.node_count_train()
    # ngram.bigram()
    # print(predict())

    #divide_dataset()
    

    print(predict_3gram())
