{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "ec41febb",
   "metadata": {},
   "outputs": [],
   "source": [
    "import sys\n",
    "\n",
    "class GetStopWrods():\n",
    "    \n",
    "    def GetListOfStopWords(self, filepath):\n",
    "        stopWordsFile = open(filepath)\n",
    "        stopWordsContext = stopWordsFile.read()\n",
    "        stopWordsList = stopWordsContext.split('\\n')\n",
    "        stopWordsFile.close()\n",
    "        return stopWordsList\n",
    "\n",
    "    def Call(self, filepath):\n",
    "        return self.GetListOfStopWords(filepath)\n",
    "\n",
    "# stopWordsPath = \"stopwords.txt\"\n",
    "# g = GetStopWrods()\n",
    "# stopWords = g.Call(stopWordsPath)\n",
    "# print(stopWords)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "b3ec352e",
   "metadata": {},
   "outputs": [],
   "source": [
    "import re\n",
    "import sys\n",
    "from nltk.stem import *\n",
    "\n",
    "class Cut():\n",
    "\n",
    "    def __init__(self, listOfStopWords = []):\n",
    "        self.regList = [r\".*: .*\\n\", r\">\", r\"[a-zA-Z0-9_-]+@[a-zA-Z0-9_-]+(\\.[a-zA-Z0-9_-]+)+\", r\"\\$\\d\"]\n",
    "        self.replaceItems = [r\"|\", r\"<\", r\">\", r\"(\", r\")\", r\"[\", r\"]\", r\",\", r\".\", r\"?\", r\"!\", r\"\\\"\", r\"-\", r\"_\", r\":\", r\"~\", r\"*\", r\"&\"]\n",
    "        self.listOfStopWords = listOfStopWords\n",
    "\n",
    "\n",
    "    def replaceWithStopWordsAndRegex(self, filename):\n",
    "        f = open(filename)\n",
    "        try:\n",
    "            fContext = f.read()\n",
    "        except UnicodeDecodeError:\n",
    "            f.close()\n",
    "            return \"\"\n",
    "        f.close()\n",
    "        for item in self.regList:\n",
    "            pattern = re.compile(item)\n",
    "            fContext = pattern.sub(\"\", fContext)\n",
    "        for item in self.replaceItems:\n",
    "            fContext = fContext.replace(item, \" \")\n",
    "        return fContext\n",
    "\n",
    "    def cutWords(self, filename):\n",
    "        fContext = self.replaceWithStopWordsAndRegex(filename)\n",
    "        if fContext == \"\":\n",
    "            return []\n",
    "        fContext = fContext.lower()\n",
    "        listOfF = fContext.split()\n",
    "        listOfF = list(set(listOfF))\n",
    "        for item in self.listOfStopWords:\n",
    "            if item in listOfF:\n",
    "                listOfF.remove(item)\n",
    "        #self.wordStemming(listOfF)\n",
    "        return listOfF\n",
    "    \n",
    "    def wordStemming(self, listOfF):\n",
    "        porterStemmer = lancaster.LancasterStemmer()\n",
    "        for i in range(len(listOfF)):\n",
    "            listOfF[i] = porterStemmer.stem(listOfF[i])\n",
    "\n",
    "    def Call(self, filename):\n",
    "        return self.cutWords(filename)\n",
    "\n",
    "\n",
    "# stopWordsPath = \"stopwords.txt\"\n",
    "# g = GetStopWrods()\n",
    "# stopWords = g.Call(stopWordsPath)\n",
    "# c = Cut(stopWords)\n",
    "# print(c.Call(\"52558\"))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "982aadf1",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "39544\n",
      "\n",
      "WordsDictionary Set begin.\n",
      "\n",
      "WordsDictionary Set end.\n",
      "\n",
      "BoolMatrix Set begin.\n",
      "\n",
      "BoolMatrix Set end.\n",
      "\n",
      "Verify begin.\n",
      "\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "<ipython-input-7-03af95dbabd5>:10: RuntimeWarning: divide by zero encountered in true_divide\n",
      "  return 1.0 / item[0]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "F1 Score: 0.7707082001520406.\n",
      "Cross Verify 0: Precisely predicted: 268, Mistakely predicted: 92\n",
      "Verify end.\n",
      "\n",
      "rec.sport.hockey 0.9\n",
      "39591\n",
      "\n",
      "WordsDictionary Set begin.\n",
      "\n",
      "WordsDictionary Set end.\n",
      "\n",
      "BoolMatrix Set begin.\n",
      "\n",
      "BoolMatrix Set end.\n",
      "\n",
      "Verify begin.\n",
      "\n",
      "F1 Score: 0.7762177614286544.\n",
      "Cross Verify 1: Precisely predicted: 269, Mistakely predicted: 91\n",
      "Verify end.\n",
      "\n",
      "rec.sport.hockey 0.9\n",
      "39338\n",
      "\n",
      "WordsDictionary Set begin.\n",
      "\n",
      "WordsDictionary Set end.\n",
      "\n",
      "BoolMatrix Set begin.\n",
      "\n",
      "BoolMatrix Set end.\n",
      "\n",
      "Verify begin.\n",
      "\n",
      "F1 Score: 0.819971410217393.\n",
      "Cross Verify 2: Precisely predicted: 291, Mistakely predicted: 69\n",
      "Verify end.\n",
      "\n",
      "rec.sport.hockey 0.9\n",
      "22011\n",
      "\n",
      "WordsDictionary Set begin.\n",
      "\n",
      "WordsDictionary Set end.\n",
      "\n",
      "BoolMatrix Set begin.\n",
      "\n",
      "BoolMatrix Set end.\n",
      "\n",
      "Verify begin.\n",
      "\n",
      "F1 Score: 0.7617115486961781.\n",
      "Cross Verify 3: Precisely predicted: 265, Mistakely predicted: 95\n",
      "Verify end.\n",
      "\n",
      "rec.sport.hockey 0.9\n",
      "39901\n",
      "\n",
      "WordsDictionary Set begin.\n",
      "\n",
      "WordsDictionary Set end.\n",
      "\n",
      "BoolMatrix Set begin.\n",
      "\n",
      "BoolMatrix Set end.\n",
      "\n",
      "Verify begin.\n",
      "\n",
      "F1 Score: 0.7615476640642354.\n",
      "Cross Verify 4: Precisely predicted: 263, Mistakely predicted: 97\n",
      "Verify end.\n",
      "\n",
      "rec.sport.hockey 0.85\n",
      "rec.sport.hockey 0.56\n"
     ]
    }
   ],
   "source": [
    "from typing import Set\n",
    "import os\n",
    "import random\n",
    "import numpy as np\n",
    "import pandas as pd\n",
    "\n",
    "def takeFirst(item):\n",
    "    if item == 0:\n",
    "        return 1000000000000000.0\n",
    "    return 1.0 / item[0]\n",
    "\n",
    "def sampleNFold(totalNumber, N = 5):\n",
    "    container = [i for i in range(totalNumber)]\n",
    "    numberPerFold = int(totalNumber / N)\n",
    "    sampledContainer = []\n",
    "    for i in range(N):\n",
    "        c = random.sample(container, numberPerFold)\n",
    "        sampledContainer.append(c)\n",
    "        for item in c:\n",
    "            container.remove(item)\n",
    "    return sampledContainer\n",
    "\n",
    "class SetProbability():\n",
    "\n",
    "    def __init__(self, N = 5):\n",
    "        self.N = N\n",
    "        self.sampledContainer = sampleNFold(300, self.N)\n",
    "        self.defaults = {}\n",
    "        self.classPaths = []\n",
    "        self.wordsContainer = []\n",
    "        self.wordsDictionary = {}\n",
    "        CLASSES_PATH = os.path.abspath('.') + \"/\" + \"20_newsgroups_subset\" + \"/\"\n",
    "        CLASSES_LIST = os.listdir(CLASSES_PATH)\n",
    "        data = [[0 for i in range(len(CLASSES_LIST))] for j in range(len(CLASSES_LIST))]\n",
    "        self.confusionMatrix = pd.DataFrame(data, index = CLASSES_LIST, columns = CLASSES_LIST) \n",
    "        for dir in CLASSES_LIST:\n",
    "            classPath = CLASSES_PATH + dir + \"/\"\n",
    "            self.classPaths.append(classPath)\n",
    "            files = os.listdir(classPath)\n",
    "            self.__dict__[dir + \"files\"] = files\n",
    "            # print(len(files))\n",
    "            self.defaults[dir + \"WordMatrix\"] = []\n",
    "            self.defaults[dir + \"BoolMatrix\"] = []\n",
    "            self.defaults[dir + \"WordContainer\"] = []\n",
    "            self.defaults[dir + \"WordDictionary\"] = {}\n",
    "        self.__dict__.update(self.defaults)\n",
    "        \n",
    "    def computeF1Score(self):\n",
    "        P = 0.0\n",
    "        R = 0.0\n",
    "        for i in range(len(self.confusionMatrix.values)):\n",
    "            tmpR = 0.0\n",
    "            for j in range(len(self.confusionMatrix.values)):\n",
    "                tmpR = tmpR + self.confusionMatrix.values[i][j]\n",
    "            tmpR = self.confusionMatrix.values[i][i] / tmpR\n",
    "            R += tmpR\n",
    "        for i in range(len(self.confusionMatrix.values)):\n",
    "            tmpP = 0.0\n",
    "            for j in range(len(self.confusionMatrix.values)):\n",
    "                tmpP = tmpP + self.confusionMatrix.values[j][i]\n",
    "            tmpP = self.confusionMatrix.values[i][i] / tmpP\n",
    "            P += tmpP\n",
    "        P = P / len(self.confusionMatrix.values)\n",
    "        R = R / len(self.confusionMatrix.values)\n",
    "        return 2 * P * R / (P + R)\n",
    "    \n",
    "    def clearLastFold(self):\n",
    "        self.defaults = {}\n",
    "        # self.classPaths = []\n",
    "        self.wordsContainer = []\n",
    "        self.wordsDictionary = {}\n",
    "        CLASSES_PATH = os.path.abspath('.') + \"/\" + \"20_newsgroups_subset\" + \"/\"\n",
    "        CLASSES_LIST = os.listdir(CLASSES_PATH)\n",
    "        data = [[0 for i in range(len(CLASSES_LIST))] for j in range(len(CLASSES_LIST))]\n",
    "        self.confusionMatrix = pd.DataFrame(data, index = CLASSES_LIST, columns = CLASSES_LIST) \n",
    "        for dir in CLASSES_LIST:\n",
    "            self.defaults[dir + \"WordMatrix\"] = []\n",
    "            self.defaults[dir + \"BoolMatrix\"] = []\n",
    "            self.defaults[dir + \"WordContainer\"] = []\n",
    "            self.defaults[dir + \"WordDictionary\"] = {}\n",
    "        self.__dict__.update(self.defaults)\n",
    "\n",
    "\n",
    "    def setWordsContainer(self, kth):\n",
    "\n",
    "        CLASSES_PATH = os.path.abspath('.') + \"/\" + \"20_newsgroups_subset\" + \"/\"\n",
    "        CLASSES_LIST = os.listdir(CLASSES_PATH)\n",
    "        \n",
    "        self.clearLastFold()\n",
    "\n",
    "        for dir in CLASSES_LIST:\n",
    "            # print(CLASSES_PATH + dir + \"/\")\n",
    "            classPath = CLASSES_PATH + dir + \"/\"\n",
    "            # self.classPaths.append(classPath)\n",
    "            files = self.__dict__[dir + \"files\"]\n",
    "            flagOfNumber = 0\n",
    "            for file in files:\n",
    "                if flagOfNumber in self.sampledContainer[kth]:\n",
    "                    flagOfNumber += 1\n",
    "                    continue\n",
    "                flagOfNumber += 1\n",
    "                filepath = classPath + file\n",
    "                stopWordsPath = \"stopwords.txt\"\n",
    "                g = GetStopWrods()\n",
    "                stopWords = g.Call(stopWordsPath)\n",
    "                c = Cut(stopWords)\n",
    "                wordsList = c.Call(filepath)\n",
    "                self.__dict__[dir + \"WordMatrix\"].append(wordsList)\n",
    "                self.__dict__[dir + \"WordContainer\"].extend(wordsList)\n",
    "                self.__dict__[dir + \"WordContainer\"] = list(set(self.__dict__[dir + \"WordContainer\"]))\n",
    "                self.wordsContainer.extend(wordsList)\n",
    "                self.wordsContainer = list(set(self.wordsContainer))\n",
    "        print(str(len(self.wordsContainer)) + \"\\n\")\n",
    "\n",
    "    def setWordsDictionary(self):\n",
    "        print(\"WordsDictionary Set begin.\" + \"\\n\")\n",
    "        for i in range(len(self.wordsContainer)):\n",
    "            self.wordsDictionary[self.wordsContainer[i]] = i\n",
    "        CLASSES_PATH = os.path.abspath('.') + \"/\" + \"20_newsgroups_subset\" + \"/\"\n",
    "        CLASSES_LIST = os.listdir(CLASSES_PATH)\n",
    "        for dir in CLASSES_LIST:\n",
    "            for i in range(len(self.__dict__[dir + \"WordContainer\"])):\n",
    "                self.__dict__[dir + \"WordDictionary\"][self.__dict__[dir + \"WordContainer\"][i]] = i\n",
    "        print(\"WordsDictionary Set end.\" + \"\\n\")\n",
    "    \n",
    "    def setBoolMatrix(self):\n",
    "        print(\"BoolMatrix Set begin.\" + \"\\n\")\n",
    "        CLASSES_PATH = os.path.abspath('.') + \"/\" + \"20_newsgroups_subset\" + \"/\"\n",
    "        CLASSES_LIST = os.listdir(CLASSES_PATH)\n",
    "        for dir in CLASSES_LIST:\n",
    "            # print(\"dir \\n\")\n",
    "            for list in self.__dict__[dir + \"WordMatrix\"]:\n",
    "                tmpList = [0 for i in range(len(self.wordsDictionary))]\n",
    "                for i in range(len(list)):\n",
    "                    #if tmpList[self.wordsDictionary[list[i]]] == 0:\n",
    "                    tmpList[self.wordsDictionary[list[i]]] += 1\n",
    "                self.__dict__[dir + \"BoolMatrix\"].append(tmpList)\n",
    "        print(\"BoolMatrix Set end.\" + \"\\n\")\n",
    "\n",
    "    # def setConditionalProbability(self):\n",
    "    #     print(\"ConditionalProbability Set begin.\" + \"\\n\")\n",
    "    #     CLASSES_PATH = os.path.dirname(os.path.abspath(__file__)) + \"/\" + \"20_newsgroups_subset\" + \"/\"\n",
    "    #     CLASSES_LIST = os.listdir(CLASSES_PATH)\n",
    "    #     for dir in CLASSES_LIST:\n",
    "    #         # print(\"dir \\n\")\n",
    "    #         SUM = 1 * len(self.wordsDictionary)\n",
    "    #         self.__dict__[dir + \"CP\"] = [1.0 for i in range(len(self.wordsDictionary))]\n",
    "    #         for list in range(len(self.__dict__[dir + \"BoolMatrix\"])):\n",
    "    #             for i in range(len(self.wordsDictionary)):\n",
    "    #                 self.__dict__[dir + \"CP\"][i] = self.__dict__[dir + \"CP\"][i] + self.__dict__[dir + \"BoolMatrix\"][list][i]\n",
    "    #                 SUM = SUM + self.__dict__[dir + \"BoolMatrix\"][list][i]\n",
    "    #         self.__dict__[dir + \"SUM\"] = SUM\n",
    "    #         for i in range(len(self.wordsDictionary)):\n",
    "    #             self.__dict__[dir + \"CP\"][i] = self.__dict__[dir + \"CP\"][i] / SUM\n",
    "    #         if 0.0 in self.__dict__[dir + \"CP\"]:\n",
    "    #             print(\"there is 0 in {}.\".format(dir))\n",
    "    #     print(\"ConditionalProbability Set end.\" + \"\\n\")\n",
    "    \n",
    "    def train(self):\n",
    "        for i in range(self.N):\n",
    "            self.setWordsContainer(i)\n",
    "            self.setWordsDictionary()\n",
    "            self.setBoolMatrix()\n",
    "            # self.setConditionalProbability()\n",
    "            self.verify(i, 5)\n",
    "            predClass, predProbability = self.predict(\"52558\", 20)\n",
    "            print(predClass, predProbability)\n",
    "\n",
    "    def predict2(self, filepath):\n",
    "        print(\"Predict begin.\" + \"\\n\")\n",
    "        stopWordsPath = \"stopwords.txt\"\n",
    "        g = GetStopWrods()\n",
    "        stopWords = g.Call(stopWordsPath)\n",
    "        c = Cut(stopWords)\n",
    "        wordsList = c.Call(filepath)\n",
    "        CLASSES_PATH = os.path.abspath('.') + \"/\" + \"20_newsgroups_subset\" + \"/\"\n",
    "        CLASSES_LIST = os.listdir(CLASSES_PATH)\n",
    "        maxProbability = 0.0\n",
    "        maxClass = None\n",
    "        minUnseen = 10000\n",
    "        for dir in CLASSES_LIST:\n",
    "            tmpProbability = 1.0\n",
    "            unseen = 0\n",
    "            for i in range(len(wordsList)):\n",
    "                if wordsList[i] in self.__dict__[dir + \"WordDictionary\"]:\n",
    "                    tmpProbability = tmpProbability * self.__dict__[dir + \"CP\"][self.__dict__[dir + \"WordDictionary\"][wordsList[i]]]\n",
    "                else:\n",
    "                    tmpProbability = tmpProbability / (self.__dict__[dir + \"SUM\"] + 1)\n",
    "                    unseen += 1\n",
    "                    # print(\"{} Unseen words {}\".format(dir, unseen))\n",
    "                    # print(tmpProbability)\n",
    "            if tmpProbability > maxProbability:\n",
    "                maxProbability = tmpProbability\n",
    "                maxClass = dir\n",
    "            if maxProbability == 0:\n",
    "                if unseen < minUnseen:\n",
    "                    maxClass = dir\n",
    "                    minUnseen = unseen\n",
    "        print(\"Predict end.\" + \"\\n\")\n",
    "        return maxClass, maxProbability\n",
    "\n",
    "    def verify(self, kth, kNumber):\n",
    "        print(\"Verify begin.\" + \"\\n\")\n",
    "        T = 0\n",
    "        F = 0\n",
    "        CLASSES_PATH = os.path.abspath('.') + \"/\" + \"20_newsgroups_subset\" + \"/\"\n",
    "        CLASSES_LIST = os.listdir(CLASSES_PATH)\n",
    "        for dir in CLASSES_LIST:\n",
    "            classPath = CLASSES_PATH + dir + \"/\"\n",
    "            for item in self.sampledContainer[kth]:\n",
    "                filepath = classPath + self.__dict__[dir + \"files\"][item]\n",
    "                predClass, predProbability = self.predict(filepath, kNumber)\n",
    "                if predClass == dir:\n",
    "                    T += 1\n",
    "                else:\n",
    "                    F += 1\n",
    "                self.confusionMatrix.at[dir, predClass] += 1\n",
    "        F1 = self.computeF1Score()\n",
    "        print(\"F1 Score: {}.\".format(F1))\n",
    "        csvName = \"confusionMatrixOfKNN\" + str(kth) + \".csv\"\n",
    "        self.confusionMatrix.to_csv(csvName)\n",
    "        print(\"Cross Verify {}: Precisely predicted: {}, Mistakely predicted: {}\".format(kth, T, F))\n",
    "        print(\"Verify end.\" + \"\\n\")\n",
    "\n",
    "    def predict(self, filepath, kNumber):\n",
    "        # print(\"Predict begin.\" + \"\\n\")\n",
    "        stopWordsPath = \"stopwords.txt\"\n",
    "        g = GetStopWrods()\n",
    "        stopWords = g.Call(stopWordsPath)\n",
    "        c = Cut(stopWords)\n",
    "        wordsList = c.Call(filepath)\n",
    "        CLASSES_PATH = os.path.abspath('.') + \"/\" + \"20_newsgroups_subset\" + \"/\"\n",
    "        CLASSES_LIST = os.listdir(CLASSES_PATH)\n",
    "        distanceContainer = []\n",
    "        indexList = [0 for i in range(len(self.wordsDictionary))]\n",
    "        # tmpProbability = 1.0\n",
    "        for i in range(len(wordsList)):\n",
    "            if wordsList[i] in self.wordsDictionary:\n",
    "                indexList[self.wordsDictionary[wordsList[i]]] += 1\n",
    "        npList = np.array(indexList)\n",
    "        # tmpIndexList = [0 for i in range(len(self.wordsDictionary))]\n",
    "        for dir in CLASSES_LIST:\n",
    "            dirMatrix = np.array(self.__dict__[dir + \"BoolMatrix\"])\n",
    "            result = list(np.matmul(dirMatrix, npList.T))\n",
    "            for item in result:\n",
    "                tmp = [item, dir]\n",
    "                distanceContainer.append(tmp)\n",
    "        distanceContainer.sort(key = takeFirst)\n",
    "        # print(\"Predict end.\" + \"\\n\")\n",
    "        votingDictionary = {}\n",
    "        for dir in CLASSES_LIST:\n",
    "            votingDictionary[dir] = 0\n",
    "        for i in range(kNumber):\n",
    "            votingDictionary[distanceContainer[i][1]] += 1\n",
    "        maxClass = \"\"\n",
    "        maxNumber = 0.0\n",
    "        for dir, votingNumber in votingDictionary.items():\n",
    "            if votingNumber > maxNumber:\n",
    "                maxClass = dir\n",
    "                maxNumber = votingNumber\n",
    "        return maxClass, maxNumber / kNumber\n",
    "\n",
    "s = SetProbability()\n",
    "s.train()\n",
    "predClass, predProbability = s.predict(\"52558\", 50)\n",
    "print(predClass, predProbability)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "bd9392fe",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.8"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
