{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Readme\n",
    "PHOC text feature extraction module"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Dependence"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "from typing import List, Dict, Optional, Tuple, Union\n",
    "import logging\n",
    "import numpy as np\n",
    "import tqdm"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Define"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "class PHOCGenerator:\n",
    "    def __init__(self, phoc_unigrams, unigram_levels, bigram_levels=None, phoc_bigrams=None, split_character=None, on_unknown_unigram='nothing', phoc_type='phoc'):\n",
    "        self.phoc_unigrams = phoc_unigrams\n",
    "        self.unigram_levels = unigram_levels\n",
    "        self.bigram_levels = bigram_levels\n",
    "        self.phoc_bigrams = phoc_bigrams\n",
    "        self.split_character = split_character\n",
    "        self.on_unknown_unigram = on_unknown_unigram\n",
    "        self.phoc_type = phoc_type\n",
    "        self.logger = logging.getLogger('PHOCGenerator')\n",
    "\n",
    "        if on_unknown_unigram not in ['error', 'warn', 'nothing']:\n",
    "            raise ValueError(f\"I don't know the on_unknown_unigram parameter '{on_unknown_unigram}'\")\n",
    "\n",
    "    @staticmethod\n",
    "    def get_unigrams_from_strings(word_strings, split_character=None):\n",
    "        unigrams = [elem for word_string in word_strings for elem in word_string.split(split_character)] if split_character is not None else [elem for word_string in word_strings for elem in word_string]\n",
    "        return sorted(set(unigrams))\n",
    "\n",
    "    @staticmethod\n",
    "    def get_n_grams(word, len_ngram):\n",
    "        '''\n",
    "        Calculates list of ngrams for a given word.\n",
    "\n",
    "        @param word: (str)\n",
    "            Word to calculate ngrams for.\n",
    "        @param len_ngram: (int)\n",
    "            Maximal ngram size: n=3 extracts 1-, 2- and 3-grams.\n",
    "        @return:  List of ngrams as strings.\n",
    "        '''\n",
    "        return [word[i:i + len_ngram] for i in range(len(word) - len_ngram + 1)]\n",
    "\n",
    "\n",
    "    def get_most_common_n_grams(self, words, num_results=50, len_ngram=2):\n",
    "        ngrams = {}\n",
    "        for word in words:\n",
    "            w_ngrams = self.get_n_grams(word, len_ngram)\n",
    "            for ngram in w_ngrams:\n",
    "                ngrams[ngram] = ngrams.get(ngram, 0) + 1\n",
    "        sorted_list = sorted(ngrams.items(), key=lambda x: x[1], reverse=True)\n",
    "        top_ngrams = sorted_list[:num_results]\n",
    "        return {k: i for i, (k, _) in enumerate(top_ngrams)}\n",
    "\n",
    "    def build_phoc_descriptor(self, words):\n",
    "        '''\n",
    "        Calculate Pyramidal Histogram of Characters (PHOC) descriptor (see Almazan 2014).\n",
    "\n",
    "        Args:\n",
    "            words (list of str): List of words to calculate descriptor for.\n",
    "            ...\n",
    "        Returns:\n",
    "            np.ndarray: The PHOC for the given words\n",
    "        '''\n",
    "        # Prepare output matrix\n",
    "        logger = logging.getLogger('PHOCGenerator')\n",
    "        phoc_size = len(self.phoc_unigrams) * np.sum(self.unigram_levels)\n",
    "        if self.phoc_bigrams is not None:\n",
    "            phoc_size += len(self.phoc_bigrams) * np.sum(self.bigram_levels)\n",
    "        phocs = np.zeros((len(words), phoc_size))\n",
    "        \n",
    "        # Prepare some lambda functions\n",
    "        occupancy = lambda k, n: [float(k) / n, float(k + 1) / n]\n",
    "        overlap = lambda a, b: [max(a[0], b[0]), min(a[1], b[1])]\n",
    "        size = lambda region: region[1] - region[0]\n",
    "\n",
    "        # Map from character to alphabet position\n",
    "        char_indices = {d: i for i, d in enumerate(self.phoc_unigrams)}\n",
    "\n",
    "        # Iterate through all the words\n",
    "        for word_index, word in enumerate(tqdm.tqdm(words)):\n",
    "            if self.split_character is not None:\n",
    "                word = word.split(self.split_character)\n",
    "\n",
    "            n = len(word)\n",
    "            for index, char in enumerate(word):\n",
    "                char_occ = occupancy(index, n)\n",
    "                if char not in char_indices:\n",
    "                    if self.on_unknown_unigram == 'warn':\n",
    "                        logger.warn('The unigram \\'%s\\' is unknown, skipping this character', char)\n",
    "                        continue\n",
    "                    elif self.on_unknown_unigram == 'error':\n",
    "                        logger.fatal('The unigram \\'%s\\' is unknown', char)\n",
    "                        raise ValueError()\n",
    "                    else:\n",
    "                        continue\n",
    "                char_index = char_indices[char]\n",
    "                for level in self.unigram_levels:\n",
    "                    for region in range(level):\n",
    "                        region_occ = occupancy(region, level)\n",
    "                        if size(overlap(char_occ, region_occ)) / size(char_occ) >= 0.5:\n",
    "                            feat_vec_index = sum([l for l in self.unigram_levels if l < level]) * len(self.phoc_unigrams) + region * len(self.phoc_unigrams) + char_index\n",
    "                            if self.phoc_type == 'phoc':\n",
    "                                phocs[word_index, feat_vec_index] = 1\n",
    "                            elif self.phoc_type == 'spoc':\n",
    "                                phocs[word_index, feat_vec_index] += 1\n",
    "                            else:\n",
    "                                raise ValueError('The phoc_type \\'%s\\' is unknown' % self.phoc_type)\n",
    "            \n",
    "            # Add bigrams\n",
    "            if self.phoc_bigrams is not None:\n",
    "                ngram_features = np.zeros(len(self.phoc_bigrams) * np.sum(self.bigram_levels))\n",
    "                ngram_occupancy = lambda k, n: [float(k) / n, float(k + 2) / n]\n",
    "                for i in range(n - 1):\n",
    "                    ngram = word[i:i + 2]\n",
    "                    ngram_str = ''.join(ngram)  # Convert list to string if needed\n",
    "                    if self.phoc_bigrams.get(ngram_str, 0) == 0:\n",
    "                        continue\n",
    "                    occ = ngram_occupancy(i, n)\n",
    "                    for level in self.bigram_levels:\n",
    "                        for region in range(level):\n",
    "                            region_occ = occupancy(region, level)\n",
    "                            overlap_size = size(overlap(occ, region_occ)) / size(occ)\n",
    "                            if overlap_size >= 0.5:\n",
    "                                if self.phoc_type == 'phoc':\n",
    "                                    ngram_features[region * len(self.phoc_bigrams) + self.phoc_bigrams[ngram_str]] = 1\n",
    "                                elif self.phoc_type == 'spoc':\n",
    "                                    ngram_features[region * len(self.phoc_bigrams) + self.phoc_bigrams[ngram_str]] += 1\n",
    "                                else:\n",
    "                                    raise ValueError('The phoc_type \\'%s\\' is unknown' % self.phoc_type)\n",
    "                phocs[word_index, -ngram_features.shape[0]:] = ngram_features\n",
    "        return phocs\n",
    "\n",
    "\n",
    "    def build_correlated_phoc(self, words, n_levels):\n",
    "        phoc_unigram_levels = [2 ** i for i in range(n_levels)]\n",
    "        phocs = self.build_phoc_descriptor(words=words)\n",
    "        phocs = phocs.astype(np.uint8)\n",
    "        phoc_splits = np.split(ary=phocs, indices_or_sections=np.sum(phoc_unigram_levels), axis=1)\n",
    "        correlated_phocs = []\n",
    "        for cur_split_id in range(1, len(phoc_splits)):\n",
    "            father_id = cur_split_id // 2\n",
    "            cur_correlated_phoc = np.bitwise_or(phoc_splits[cur_split_id] * 2, phoc_splits[father_id])\n",
    "            correlated_phocs.append(cur_correlated_phoc)\n",
    "        correlated_phocs = np.hstack(correlated_phocs)\n",
    "        return correlated_phocs\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Test"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Mock Test Case Define"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Sample test data\n",
    "words = ['hello', 'world', 'hello world']  # Ensure this is a list of strings\n",
    "phoc_unigrams = ['h', 'e', 'l', 'o', 'w', 'r', 'd']\n",
    "unigram_levels = [1, 2]  # Example levels\n",
    "bigram_levels = [1]  # Example levels\n",
    "phoc_bigrams = {'he': 0, 'el': 1, 'lo': 2}  # Example bigrams\n",
    "split_character = ' '\n",
    "on_unknown_unigram = 'warn'\n",
    "phoc_type = 'phoc'\n",
    "\n",
    "# Initialize PHOCGenerator instance\n",
    "phoc_gen = PHOCGenerator(\n",
    "    phoc_unigrams=phoc_unigrams,\n",
    "    unigram_levels=unigram_levels,\n",
    "    bigram_levels=bigram_levels,\n",
    "    phoc_bigrams=phoc_bigrams,\n",
    "    split_character=split_character,\n",
    "    on_unknown_unigram=on_unknown_unigram,\n",
    "    phoc_type=phoc_type\n",
    ")\n",
    "\n",
    "def test_get_unigrams_from_strings():\n",
    "    print(\"Testing get_unigrams_from_strings...\")\n",
    "    result = PHOCGenerator.get_unigrams_from_strings(words, split_character)\n",
    "    print(\"Unigrams:\", result)\n",
    "\n",
    "def test_get_most_common_n_grams():\n",
    "    print(\"Testing get_most_common_n_grams...\")\n",
    "    result = phoc_gen.get_most_common_n_grams(words, num_results=2, len_ngram=2)\n",
    "    print(\"Most common n-grams:\", result)\n",
    "\n",
    "def test_build_phoc_descriptor():\n",
    "    print(\"Testing build_phoc_descriptor...\")\n",
    "    result = phoc_gen.build_phoc_descriptor(words)\n",
    "    print(\"PHOC Descriptor shape:\", result.shape)\n",
    "\n",
    "def test_build_correlated_phoc():\n",
    "    print(\"Testing build_correlated_phoc...\")\n",
    "    result = phoc_gen.build_correlated_phoc(words, n_levels=2)\n",
    "    print(\"Correlated PHOC shape:\", result.shape)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Test Run"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Testing get_unigrams_from_strings...\n",
      "Unigrams: ['hello', 'world']\n",
      "Testing get_most_common_n_grams...\n",
      "Most common n-grams: {'he': 0, 'el': 1}\n",
      "Testing build_phoc_descriptor...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "  0%|          | 0/3 [00:00<?, ?it/s]/tmp/ipykernel_80705/3675881896.py:79: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead\n",
      "  logger.warn('The unigram \\'%s\\' is unknown, skipping this character', char)\n",
      "WARNING:PHOCGenerator:The unigram 'hello' is unknown, skipping this character\n",
      "WARNING:PHOCGenerator:The unigram 'world' is unknown, skipping this character\n",
      "WARNING:PHOCGenerator:The unigram 'hello' is unknown, skipping this character\n",
      "WARNING:PHOCGenerator:The unigram 'world' is unknown, skipping this character\n",
      "100%|██████████| 3/3 [00:00<00:00, 959.94it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "PHOC Descriptor shape: (3, 24)\n",
      "Testing build_correlated_phoc...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "  0%|          | 0/3 [00:00<?, ?it/s]WARNING:PHOCGenerator:The unigram 'hello' is unknown, skipping this character\n",
      "WARNING:PHOCGenerator:The unigram 'world' is unknown, skipping this character\n",
      "WARNING:PHOCGenerator:The unigram 'hello' is unknown, skipping this character\n",
      "WARNING:PHOCGenerator:The unigram 'world' is unknown, skipping this character\n",
      "100%|██████████| 3/3 [00:00<00:00, 892.60it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Correlated PHOC shape: (3, 16)\n",
      "All tests executed!\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "\n"
     ]
    }
   ],
   "source": [
    "logging.basicConfig(level=logging.INFO)\n",
    "\n",
    "test_get_unigrams_from_strings()\n",
    "test_get_most_common_n_grams()\n",
    "test_build_phoc_descriptor()\n",
    "test_build_correlated_phoc()\n",
    "\n",
    "print(\"All tests executed!\")\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Application"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# unigrams = [chr(i) for i in range(ord('a'), ord('z') + 1) + range(ord('0'), ord('9') + 1)]\n",
    "# unigrams = get_unigrams_from_strings(word_strings=[elem[1] for elem in words])\n",
    "# unigrams = get_unigrams_from_strings(word_strings=[elem[1] for elem in words])"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "science39",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.18"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
