{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "a9dde8bb",
   "metadata": {},
   "outputs": [],
   "source": [
    "import re\n",
    "import sys\n",
    "import os\n",
    "import random\n",
    "from nltk.stem import *\n",
    "from typing import Set\n",
    "\n",
    "class GetStopWrods():\n",
    "    \n",
    "    def GetListOfStopWords(self, filepath):\n",
    "        stopWordsFile = open(filepath)\n",
    "        stopWordsContext = stopWordsFile.read()\n",
    "        stopWordsList = stopWordsContext.split('\\n')\n",
    "        stopWordsFile.close()\n",
    "        return stopWordsList\n",
    "\n",
    "    def Call(self, filepath):\n",
    "        return self.GetListOfStopWords(filepath)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "5b6f2e39",
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "class Cut():\n",
    "\n",
    "    def __init__(self, listOfStopWords = []):\n",
    "        self.regList = [r\".*: .*\\n\", r\">\", r\"[a-zA-Z0-9_-]+@[a-zA-Z0-9_-]+(\\.[a-zA-Z0-9_-]+)+\", r\"\\$\\d\"]\n",
    "        self.replaceItems = [r\"|\", r\"<\", r\">\", r\"(\", r\")\", r\"[\", r\"]\", r\",\", r\".\", r\"?\", r\"!\", r\"\\\"\", r\"-\", r\"_\", r\":\", r\"~\", r\"*\", r\"&\"]\n",
    "        self.listOfStopWords = listOfStopWords\n",
    "\n",
    "\n",
    "    def replaceWithStopWordsAndRegex(self, filename):\n",
    "        f = open(filename)\n",
    "        try:\n",
    "            fContext = f.read()\n",
    "        except UnicodeDecodeError:\n",
    "            f.close()\n",
    "            return \"\"\n",
    "        f.close()\n",
    "        for item in self.regList:\n",
    "            pattern = re.compile(item)\n",
    "            fContext = pattern.sub(\"\", fContext)\n",
    "        for item in self.replaceItems:\n",
    "            fContext = fContext.replace(item, \" \")\n",
    "        return fContext\n",
    "\n",
    "    def cutWords(self, filename):\n",
    "        fContext = self.replaceWithStopWordsAndRegex(filename)\n",
    "        if fContext == \"\":\n",
    "            return []\n",
    "        fContext = fContext.lower()\n",
    "        listOfF = fContext.split()\n",
    "        listOfF = list(set(listOfF))\n",
    "        for item in self.listOfStopWords:\n",
    "            if item in listOfF:\n",
    "                listOfF.remove(item)\n",
    "        #self.wordStemming(listOfF)\n",
    "        return listOfF\n",
    "    \n",
    "    def wordStemming(self, listOfF):\n",
    "        porterStemmer = lancaster.LancasterStemmer()\n",
    "        for i in range(len(listOfF)):\n",
    "            listOfF[i] = porterStemmer.stem(listOfF[i])\n",
    "\n",
    "    def Call(self, filename):\n",
    "        return self.cutWords(filename)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "f71022b1",
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "def computeDistance(tmpIndexList, indexList):\n",
    "    distance = 1\n",
    "    for i in range(len(indexList)):\n",
    "        distance = distance + tmpIndexList[i] * indexList[i]\n",
    "    return 1.0 / distance\n",
    "\n",
    "def takeFirst(item):\n",
    "    return item[0]\n",
    "\n",
    "class KNN():\n",
    "\n",
    "    def __init__(self):\n",
    "        self.defaults = {}\n",
    "        self.classPaths = []\n",
    "        self.wordsContainer = []\n",
    "        self.wordsDictionary = {}\n",
    "        CLASSES_PATH = os.path.abspath('.') + \"/\" + \"20_newsgroups\" + \"/\"\n",
    "        CLASSES_LIST = os.listdir(CLASSES_PATH)\n",
    "        for dir in CLASSES_LIST:\n",
    "            self.defaults[dir + \"WordMatrix\"] = []\n",
    "            self.defaults[dir + \"IndexMatrix\"] = []\n",
    "            self.defaults[dir + \"WordContainer\"] = []\n",
    "            self.defaults[dir + \"WordDictionary\"] = {}\n",
    "        self.__dict__.update(self.defaults)\n",
    "\n",
    "    def setWordsContainer(self):\n",
    "        CLASSES_PATH = os.path.abspath('.') + \"/\" + \"20_newsgroups\" + \"/\"\n",
    "        CLASSES_LIST = os.listdir(CLASSES_PATH)\n",
    "        for dir in CLASSES_LIST:\n",
    "            #print(CLASSES_PATH + dir + \"/\")\n",
    "            classPath = CLASSES_PATH + dir + \"/\"\n",
    "            self.classPaths.append(classPath)\n",
    "            files = os.listdir(classPath)\n",
    "            for file in files:\n",
    "                filepath = classPath + file\n",
    "                stopWordsPath = \"stopwords.txt\"\n",
    "                g = GetStopWrods()\n",
    "                stopWords = g.Call(stopWordsPath)\n",
    "                c = Cut(stopWords)\n",
    "                wordsList = c.Call(filepath)\n",
    "                self.__dict__[dir + \"WordMatrix\"].append(wordsList)\n",
    "                self.__dict__[dir + \"WordContainer\"].extend(wordsList)\n",
    "                self.__dict__[dir + \"WordContainer\"] = list(set(self.__dict__[dir + \"WordContainer\"]))\n",
    "                self.wordsContainer.extend(wordsList)\n",
    "                self.wordsContainer = list(set(self.wordsContainer))\n",
    "        print(str(len(self.wordsContainer)) + \"\\n\")\n",
    "\n",
    "    def setWordsDictionary(self):\n",
    "        print(\"WordsDictionary Set begin.\" + \"\\n\")\n",
    "        for i in range(len(self.wordsContainer)):\n",
    "            self.wordsDictionary[self.wordsContainer[i]] = i\n",
    "        CLASSES_PATH = os.path.abspath('.') + \"/\" + \"20_newsgroups\" + \"/\"\n",
    "        CLASSES_LIST = os.listdir(CLASSES_PATH)\n",
    "        for dir in CLASSES_LIST:\n",
    "            for i in range(len(self.__dict__[dir + \"WordContainer\"])):\n",
    "                self.__dict__[dir + \"WordDictionary\"][self.__dict__[dir + \"WordContainer\"][i]] = i\n",
    "        print(\"WordsDictionary Set end.\" + \"\\n\")\n",
    "    \n",
    "    def setIndexMatrix(self):\n",
    "        print(\"IndexMatrix Set begin.\" + \"\\n\")\n",
    "        CLASSES_PATH = os.path.abspath('.') + \"/\" + \"20_newsgroups\" + \"/\"\n",
    "        CLASSES_LIST = os.listdir(CLASSES_PATH)\n",
    "        for dir in CLASSES_LIST:\n",
    "            # print(\"dir \\n\")\n",
    "            for list in self.__dict__[dir + \"WordMatrix\"]:\n",
    "                tmpList = []\n",
    "                for i in range(len(list)):\n",
    "                    tmpList.append(self.wordsDictionary[list[i]])\n",
    "                self.__dict__[dir + \"IndexMatrix\"].append(tmpList)\n",
    "        print(\"IndexMatrix Set end.\" + \"\\n\")\n",
    "\n",
    "    # def setConditionalProbability(self):\n",
    "    #     print(\"ConditionalProbability Set begin.\" + \"\\n\")\n",
    "    #     CLASSES_PATH = os.path.dirname(os.path.abspath(__file__)) + \"/\" + \"20_newsgroups\" + \"/\"\n",
    "    #     CLASSES_LIST = os.listdir(CLASSES_PATH)\n",
    "    #     for dir in CLASSES_LIST:\n",
    "    #         print(\"dir \\n\")\n",
    "    #         SUM = len(self.__dict__[dir + \"WordContainer\"])\n",
    "    #         self.__dict__[dir + \"CP\"] = [1.0 for i in range(len(self.__dict__[dir + \"WordContainer\"]))]\n",
    "    #         for list in range(len(self.__dict__[dir + \"BoolMatrix\"])):\n",
    "    #             for i in range(len(self.__dict__[dir + \"WordContainer\"])):\n",
    "    #                 self.__dict__[dir + \"CP\"][i] = self.__dict__[dir + \"CP\"][i] + self.__dict__[dir + \"BoolMatrix\"][list][i]\n",
    "    #                 SUM = SUM + self.__dict__[dir + \"BoolMatrix\"][list][i]\n",
    "    #         self.__dict__[dir + \"SUM\"] = SUM\n",
    "    #         for i in range(len(self.__dict__[dir + \"WordContainer\"])):\n",
    "    #             self.__dict__[dir + \"CP\"][i] = self.__dict__[dir + \"CP\"][i] / SUM\n",
    "    #     print(\"ConditionalProbability Set end.\" + \"\\n\")\n",
    "    \n",
    "    def train(self):\n",
    "        self.setWordsContainer()\n",
    "        self.setWordsDictionary()\n",
    "        self.setIndexMatrix()\n",
    "        # self.setConditionalProbability()\n",
    "\n",
    "    def predict(self, filepath, kNumber):\n",
    "        print(\"Predict begin.\" + \"\\n\")\n",
    "        stopWordsPath = \"stopwords.txt\"\n",
    "        g = GetStopWrods()\n",
    "        stopWords = g.Call(stopWordsPath)\n",
    "        c = Cut(stopWords)\n",
    "        wordsList = c.Call(filepath)\n",
    "        CLASSES_PATH = os.path.abspath('.') + \"/\" + \"20_newsgroups\" + \"/\"\n",
    "        CLASSES_LIST = os.listdir(CLASSES_PATH)\n",
    "        distanceContainer = []\n",
    "        indexList = [0 for i in range(len(self.wordsDictionary))]\n",
    "        # tmpProbability = 1.0\n",
    "        for i in range(len(wordsList)):\n",
    "            indexList[self.wordsDictionary[wordsList[i]]] = 1\n",
    "        tmpIndexList = [0 for i in range(len(self.wordsDictionary))]\n",
    "        for dir in CLASSES_LIST:\n",
    "            for selflist in self.__dict__[dir + \"IndexMatrix\"]:\n",
    "                for i in range(len(selflist)):\n",
    "                    tmpIndexList[selflist[i]] = 1\n",
    "                distance = computeDistance(tmpIndexList, indexList)\n",
    "                distanceAndDir = [distance, dir]\n",
    "                distanceContainer.append(distanceAndDir)\n",
    "                for i in range(len(tmpIndexList)):\n",
    "                    tmpIndexList[i] = 0\n",
    "        distanceContainer.sort(key = takeFirst)\n",
    "        votingDictionary = {}\n",
    "        for dir in CLASSES_LIST:\n",
    "            votingDictionary[dir] = 0\n",
    "        for i in range(kNumber):\n",
    "            votingDictionary[distanceContainer[i][1]] += 1\n",
    "        maxClass = \"\"\n",
    "        maxNumber = 0.0\n",
    "        for dir, votingNumber in votingDictionary.items():\n",
    "            if votingNumber > maxNumber:\n",
    "                maxClass = dir\n",
    "                maxNumber = votingNumber\n",
    "        print(\"Predict end.\" + \"\\n\")\n",
    "        return maxClass, maxNumber / kNumber\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "3a6dd0d5",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "213738\n",
      "\n",
      "WordsDictionary Set begin.\n",
      "\n",
      "WordsDictionary Set end.\n",
      "\n",
      "IndexMatrix Set begin.\n",
      "\n",
      "IndexMatrix Set end.\n",
      "\n",
      "Predict begin.\n",
      "\n",
      "Predict end.\n",
      "\n",
      "rec.sport.hockey\n"
     ]
    }
   ],
   "source": [
    "s = KNN()\n",
    "s.train()\n",
    "predClass, predProbability = s.predict(\"52558\", 100)\n",
    "print(predClass)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "fe87d5b2",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0.17\n"
     ]
    }
   ],
   "source": [
    "print(predProbability)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "1b132a81",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.8"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
