{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "下面的这个函数输出围绕着Image和经过PHOC编码输出"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def net_output_for_word_image_list(\n",
    "    phocnet,\n",
    "    word_img_list,\n",
    "    min_img_width_height=-1,\n",
    "    input_layer=\"word_images\",\n",
    "    output_layer=\"sigmoid\",\n",
    "    print_frequency=1000,\n",
    "):\n",
    "    \"\"\"\n",
    "    Predict PHOCs from the given PHOCNet\n",
    "    @param phocnet: caffe.Net\n",
    "        A pretrained PHOCNet. The first layer of the PHOCNet must be an InputLayer\n",
    "        (no LMDB or MemoryDataLayers)\n",
    "    @param word_img_list: list of ndarrays\n",
    "        A list of word images for which to predict the PHOCs.\n",
    "        Every image in the last has to be a single channel gray-scale or binary\n",
    "        ndarray in the range from 0 (black) to 255 (white).\n",
    "    @param min_img_width_height: int\n",
    "        The minimum height or width of an image to be passed to the PHOCNet.\n",
    "        If an image in the word_img_list is smaller than the supplied number\n",
    "        it is automatically resized before processed by the CNN. Default: -1\n",
    "    @param input_layer: str\n",
    "        The name of the input layer blob. Default: word_images\n",
    "    @param output_layer: str\n",
    "        The name of the output layer blob. Default: sigmoid\n",
    "    @param print_frequency: int\n",
    "        Output is generated after this amount of images has been prcessed by\n",
    "        the PHOCNet.\n",
    "    \"\"\"\n",
    "    output = []\n",
    "    logger.info(\"Evaluating net...\")\n",
    "    for idx, word_img in enumerate(word_img_list):\n",
    "        # scale to correct pixel values (0 = background, 1 = text)\n",
    "        word_img = word_img.astype(np.float32)\n",
    "        word_img -= 255.0\n",
    "        word_img /= -255.0\n",
    "\n",
    "        # check size\n",
    "        if np.amin(word_img.shape[:2]) < min_img_width_height:\n",
    "            scale = float(min_img_width_height + 1) / float(np.amin(word_img.shape[:2]))\n",
    "            new_shape = (int(scale * word_img.shape[0]), int(scale * word_img.shape[1]))\n",
    "            word_img = resize(image=word_img, output_shape=new_shape)\n",
    "        word_img = word_img.reshape(\n",
    "            (\n",
    "                1,\n",
    "                1,\n",
    "            )\n",
    "            + word_img.shape\n",
    "        ).astype(np.float32)\n",
    "\n",
    "        # reshape the PHOCNet\n",
    "        phocnet.blobs[input_layer].reshape(*word_img.shape)\n",
    "        phocnet.reshape()\n",
    "\n",
    "        # forward the word image through the PHOCNet\n",
    "        phocnet.blobs[input_layer].data[...] = word_img\n",
    "        output.append(phocnet.forward()[output_layer].flatten())\n",
    "        if (idx + 1) % print_frequency == 0 or (idx + 1) == len(word_img_list):\n",
    "            logger.debug(\n",
    "                \"    [ %*d / %d ]\",\n",
    "                len(str(len(word_img_list))),\n",
    "                idx + 1,\n",
    "                len(word_img_list),\n",
    "            )\n",
    "    return np.vstack(output)\n",
    "\n",
    "\n",
    "\n"
   ]
  }
 ],
 "metadata": {
  "language_info": {
   "name": "python"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
