﻿// TODO: remove and replace this file header comment
// This is a .cpp file you will edit and turn in.
// Remove starter comments and add your own
// comments on each function and on complex code sections.
#include <iostream>
#include <fstream>
#include "error.h"
#include "filelib.h"
#include "map.h"
#include "search.h"
#include "set.h"
#include "strlib.h"
#include "vector.h"
#include "simpio.h"
#include "testing/SimpleTest.h"
using namespace std;

bool checkIfaStingContainsLetter(string str) {
    for (const auto &ch : str) {
        if (isalpha(ch)) {
            return true;
        }
    }
    return false;
}

// TODO: Add a function header comment here to explain the
// behavior of the function and how you implemented this behavior
string cleanToken(string s)
{
    int index_of_initial_letter = 0;
    int index_of_end_letter = 0;
    for (int i = 0; i < s.length(); i++) {
        if (!ispunct(s[i])) {
            index_of_initial_letter = i;
            break;
        }
    }
    for (int i = s.length() - 1; i > 0; i--) {
        if (!ispunct(s[i])) {
            index_of_end_letter = i;
            break;
        }
    }
    string result = s.substr(index_of_initial_letter, index_of_end_letter - index_of_initial_letter + 1);
    if (!checkIfaStingContainsLetter(result)) {
        return "";
    }
    return toLowerCase(result);
}

// TODO: Add a function header comment here to explain the
// behavior of the function and how you implemented this behavior
Set<string> gatherTokens(string text)
{
    Set<string> tokens;
    Vector<string> words = stringSplit(text, " ");
    for (const auto& word: words) {
        string temp = cleanToken(word);
        if (!temp.empty()) {
            tokens.add(temp);
        }
    }
    return tokens;
}

// TODO: Add a function header comment here to explain the
// behavior of the function and how you implemented this behavior
int buildIndex(string dbfile, Map<string, Set<string>>& index)
{
    ifstream in;

    if (!openFile(in, dbfile))
        error("Cannot open file named " + dbfile);

    Vector<string> lines;
    readEntireFile(in, lines);
    int count = 0;
    for (int i = 1; i < lines.size(); i+=2) {
        Set<string> unique_tokens = gatherTokens(lines[i]);
        string URL = lines[i-1];
        for (const string& token : unique_tokens) {
            index[token].add(URL);
        }
        count++;
    }
    return count;
}

// TODO: Add a function header comment here to explain the
// behavior of the function and how you implemented this behavior
Set<string> findQueryMatches(Map<string, Set<string>>& index, string query)
{
    Set<string> result;
    // TODO: your code here
    string processed_query = toLowerCase(query);
    Vector<string> tokens = stringSplit(query, " ");
    if (tokens.size() == 1) {
        result = index[processed_query];
    } else {
        for (const string& token : tokens) {
            string processed_token = cleanToken(token);
            if (token[0] == '+') {
                result.intersect(index[processed_token]);
            } else if (token[0] == '-') {
                result.difference(index[processed_token]);
            } else {
                result.unionWith(index[processed_token]);
            }
        }
    }
    return result;
}

// TODO: Add a function header comment here to explain the
// behavior of the function and how you implemented this behavior
void searchEngine(string dbfile)
{
    // TODO: your code here
    Map<string, Set<string>> index;
    int number_of_pages = buildIndex(dbfile, index);
    cout << "Stand by while building index..." << endl;
    cout << "Indexed " << number_of_pages << " pages containing " << index.size() << " unique terms" << endl;
    cout << endl;
    while (1) {
        string query = getLine("Enter query sentence(RETURN/ENTER to quit): ");
        if (query.empty()) {
            break;
        }
        Set<string> result = findQueryMatches(index, query);
        cout << "Found " << result.size() << " matching pages " << endl;
        cout << result.toString() << endl;
        cout << endl;
    }
}

/* * * * * * Test Cases * * * * * */

PROVIDED_TEST("cleanToken on strings with no punctuation at beginning or end") {
    EXPECT_EQUAL(cleanToken("hello"), "hello");
    EXPECT_EQUAL(cleanToken("WORLD"), "world");
    EXPECT_EQUAL(cleanToken("CS*106B"), "cs*106b");
}

PROVIDED_TEST("cleanToken on strings with some punctuation at beginning and end") {
    EXPECT_EQUAL(cleanToken("/hello/"), "hello");
    EXPECT_EQUAL(cleanToken("~woRLD!"), "world");
}

PROVIDED_TEST("cleanToken on non-word strings (no letters)"){
    EXPECT_EQUAL(cleanToken("106"), "");
    EXPECT_EQUAL(cleanToken("~!106!!!"), "");
}

PROVIDED_TEST("gatherTokens from simple string") {
    Set<string> expected = {"go", "gophers"};
    EXPECT_EQUAL(gatherTokens("go go go gophers"), expected);
}

PROVIDED_TEST("gatherTokens correctly cleans tokens") {
    Set<string> expected = {"i", "love", "cs*106b"};
    EXPECT_EQUAL(gatherTokens("I _love_ CS*106B!"), expected);
}

PROVIDED_TEST("gatherTokens from seuss, 5 unique words, mixed case, punctuation") {
    Set<string> tokens = gatherTokens("One Fish Two Fish *Red* fish Blue fish ** 10 RED Fish?");
    EXPECT_EQUAL(tokens.size(), 5);
    EXPECT(tokens.contains("fish"));
    EXPECT(!tokens.contains("Fish"));
}

PROVIDED_TEST("buildIndex from tiny.txt, 4 pages, 11 unique tokens") {
    Map<string, Set<string>> index;
    int nPages = buildIndex("res/tiny.txt", index);
    EXPECT_EQUAL(nPages, 4);
    EXPECT_EQUAL(index.size(), 11);
    EXPECT(index.containsKey("fish"));
}

PROVIDED_TEST("findQueryMatches from tiny.txt, single word query") {
    Map<string, Set<string>> index;
    buildIndex("res/tiny.txt", index);
    Set<string> matchesRed = findQueryMatches(index, "red");
    EXPECT_EQUAL(matchesRed.size(), 2);
    EXPECT(matchesRed.contains("www.dr.seuss.net"));
    Set<string> matchesHippo = findQueryMatches(index, "hippo");
    EXPECT(matchesHippo.isEmpty());
}

PROVIDED_TEST("findQueryMatches from tiny.txt, compound queries") {
    Map<string, Set<string>> index;
    buildIndex("res/tiny.txt", index);
    Set<string> matchesRedOrFish = findQueryMatches(index, "red fish");
    EXPECT_EQUAL(matchesRedOrFish.size(), 4);
    Set<string> matchesRedAndFish = findQueryMatches(index, "red +fish");
    EXPECT_EQUAL(matchesRedAndFish.size(), 1);
    Set<string> matchesRedWithoutFish = findQueryMatches(index, "red -fish");
    EXPECT_EQUAL(matchesRedWithoutFish.size(), 1);
}


// TODO: add your test cases here
STUDENT_TEST("cleanToken on strings with punctuation at beginning or end") {
    EXPECT_EQUAL(cleanToken("<<section>>"), "section");
    EXPECT_EQUAL(cleanToken("section!"), "section");
    EXPECT_EQUAL(cleanToken("section's"), "section's");
    EXPECT_EQUAL(cleanToken("section-10"), "section-10");
}

STUDENT_TEST("cleanToken on varieties strings"){
    EXPECT_EQUAL(cleanToken("#@C@S@#"), "c@s");
    EXPECT_EQUAL(cleanToken("#@##-106@#"), "");
    EXPECT_EQUAL(cleanToken("#@CS-106@#"), "cs-106");
    EXPECT_EQUAL(cleanToken("#@C@S@1@0@6@#"), "c@s@1@0@6");
}

STUDENT_TEST("gatherTokens from simple string") {
    Set<string> expected = {"www.shoppinglist.com"};
    EXPECT_EQUAL(gatherTokens("www.shoppinglist.com"), expected);
}

STUDENT_TEST("gatherTokens from simple string") {
    Set<string> expected = {"5lb_m&ms", "bread", "milk", "fish"};
    EXPECT_EQUAL(gatherTokens("milk, fish, bread, 5lb_m&ms"), expected);
}

STUDENT_TEST("gatherTokens from simple string") {
    Set<string> expected = {"tasty", "mushrooms", "simple", "cheap"};
    EXPECT_EQUAL(gatherTokens("tasty -mushrooms simple +cheap"), expected);
}

STUDENT_TEST("buildIndex from website.txt, 32 pages") {
    Map<string, Set<string>> index;
    int nPages = buildIndex("res/website.txt", index);
    EXPECT_EQUAL(nPages, 32);
    EXPECT(index.containsKey("know"));
}

STUDENT_TEST("findQueryMatches from tiny.txt, compound queries") {
    Map<string, Set<string>> index;
    buildIndex("res/tiny.txt", index);
    Set<string> matchesBreadOrFish = findQueryMatches(index, "bread fish -red");
    EXPECT_EQUAL(matchesBreadOrFish.size(), 2);
}
