{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 关联规则"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Platform : win32 [win32/linux]\n",
      "Systerm  : 3.6.10 (default, Mar  5 2020, 10:17:47) [MSC v.1900 64 bit (AMD64)] \n",
      "numpy  Version: 1.16.0\n"
     ]
    }
   ],
   "source": [
    "import os \n",
    "import sys\n",
    "import time \n",
    "import random\n",
    "import numpy as np \n",
    "from pprint import pprint\n",
    "np.set_printoptions(precision=3)   # 设置 numpy 显示位数\n",
    "print('Platform : {} [win32/linux]'.format(sys.platform))  # 当前平台信息 \n",
    "print('Systerm  : {} '.format(sys.version))\n",
    "print('numpy  Version: {}'.format(np.__version__))\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 字符插入操作\n",
    "def insert_list(noise_list, used_list, l_insert_p=0.5, c_insert_p=0.9):\n",
    "    \"\"\"\n",
    "    :param noise_list:    噪声序列\n",
    "    :param used_list:     目标插入序列\n",
    "    :param l_insert_p:    序列插入概率\n",
    "    :param c_insert_p:    目标字符插入概率\n",
    "    :return:\n",
    "    \"\"\"\n",
    "    # 50% 序列插入\n",
    "    if random.random() < l_insert_p :\n",
    "        # 插入序列开始位\n",
    "        start = random.randint(0, len(used_list)-5)\n",
    "        # 插入间隔\n",
    "        num = random.randint(1, len(used_list) - start - 1)\n",
    "\n",
    "        # 插入 噪声序列 中\n",
    "        insert_start = 0\n",
    "        for node_i in used_list[start: start+num+1]:\n",
    "            # 当前元素 90% 的概率插入噪声序列中\n",
    "            if random.random() < c_insert_p:\n",
    "                # print(node_i)\n",
    "                insert_start = random.randint(1, len(noise_list) - insert_start) + insert_start\n",
    "                # print(insert_start)\n",
    "                # 插入序列中\n",
    "                noise_list.insert(insert_start, node_i)\n",
    "\n",
    "    return noise_list\n",
    "\n",
    "\n",
    "# 模拟生成用户点击行为序列\n",
    "def generate_list(list_num=1000, char_num=10000, list_len=200, l_insert_p=0.5, c_insert_p=0.9):\n",
    "    \"\"\"\n",
    "    :param list_num:   生成序列数量\n",
    "    :param char_num:   点击行为数量（字符数量）\n",
    "    :param list_len:   序列最大长度\n",
    "    :param l_insert_p:    序列插入概率\n",
    "    :param c_insert_p:    目标字符插入概率\n",
    "    :return:\n",
    "    \"\"\"\n",
    "    random.seed(0)\n",
    "\n",
    "    # 目标序列\n",
    "    used_list = ['o', 'A', 'B', 'C', 'D', 'E']\n",
    "\n",
    "    # 生成的测试序列\n",
    "    test_list = list()\n",
    "\n",
    "    for i in range(list_num):\n",
    "        # 噪声序列\n",
    "        noise_list = [str(random.randint(1, char_num)) for _ in range(random.randint(5, list_len))]\n",
    "\n",
    "        test_list.append(insert_list(noise_list, used_list, l_insert_p=l_insert_p, c_insert_p=c_insert_p))\n",
    "        # print(noise_list)\n",
    "    return test_list\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "---\n",
    "### 前后顺序挖掘（不要求AB严格相邻.    A->B 远大于 B->A）"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "['49', '27', '3', 'A', '17', '33', '32', '26', '20', '31', '23', '38', '14', '33', 'B', '9', '19', '9', '49', 'C']\n",
      "['23', 'A', '28', '21', '40', '41', '14', '36', 'B', '31']\n",
      "['40', '32', 'A', '22', '16', '47', 'B']\n",
      "['6', '21', '33', '32', '7', 'o', '20', '36', 'A', 'B', 'C', 'D', 'E']\n",
      "['3', '40', '43', '17', '31', '5', '6', '44', '49', 'o', '9', 'A']\n",
      "['14', '44', '38', '27', '38', '18', 'o', '29', 'A', '32', '43', 'B', '42', 'C', 'D', '45', '23']\n",
      "['28', '4', '7', '10', '45', '15', '3', '37', '41', '35', '39', '44', '5', '2', 'o', 'B', 'C', '8', 'D']\n",
      "['47', 'A', 'B', '4', '44', '2', '35', '28', '40', 'C', '7', '17', 'D', '5', '15']\n",
      "['37', '11', '45', '44', '14', '50', '4', '44', '11', 'A', '11', '22', '34', '17', '8', '39', '29', '43', '12', '1', '31', 'B', 'C', 'D']\n",
      "['9', '16', 'A', '49', '31', '23', 'B', '40', '19', 'C', '44', '23', '38', '41', '40', 'D', '9', 'E']\n",
      "Counter({'A': 175, 'B': 155, 'C': 116, 'D': 87, 'o': 84, '44': 71, '10': 67, '48': 65, '41': 61, '31': 60, '26': 58, '13': 58, '27': 57, '23': 57, '49': 56, '6': 56, '9': 55, '39': 55, '1': 54, '47': 53, '7': 53, '50': 53, '25': 53, '3': 52, '5': 52, '17': 51, '28': 51, '35': 51, '46': 51, '45': 50, '2': 50, '19': 49, '22': 49, '15': 49, '11': 49, '33': 48, '20': 48, '8': 48, '29': 47, '34': 46, '21': 45, '16': 45, '14': 44, '42': 44, '38': 43, '37': 43, '32': 42, '40': 42, '12': 42, 'E': 40, '18': 40, '43': 39, '30': 39, '24': 38, '36': 37, '4': 33})\n",
      "char_index :  {'1': 0, '10': 1, '11': 2, '12': 3, '13': 4, '14': 5, '15': 6, '16': 7, '17': 8, '18': 9, '19': 10, '2': 11, '20': 12, '21': 13, '22': 14, '23': 15, '24': 16, '25': 17, '26': 18, '27': 19, '28': 20, '29': 21, '3': 22, '30': 23, '31': 24, '32': 25, '33': 26, '34': 27, '35': 28, '36': 29, '37': 30, '38': 31, '39': 32, '4': 33, '40': 34, '41': 35, '42': 36, '43': 37, '44': 38, '45': 39, '46': 40, '47': 41, '48': 42, '49': 43, '5': 44, '50': 45, '6': 46, '7': 47, '8': 48, '9': 49, 'A': 50, 'B': 51, 'C': 52, 'D': 53, 'E': 54, 'o': 55}\n",
      "index_char :  {0: '1', 1: '10', 2: '11', 3: '12', 4: '13', 5: '14', 6: '15', 7: '16', 8: '17', 9: '18', 10: '19', 11: '2', 12: '20', 13: '21', 14: '22', 15: '23', 16: '24', 17: '25', 18: '26', 19: '27', 20: '28', 21: '29', 22: '3', 23: '30', 24: '31', 25: '32', 26: '33', 27: '34', 28: '35', 29: '36', 30: '37', 31: '38', 32: '39', 33: '4', 34: '40', 35: '41', 36: '42', 37: '43', 38: '44', 39: '45', 40: '46', 41: '47', 42: '48', 43: '49', 44: '5', 45: '50', 46: '6', 47: '7', 48: '8', 49: '9', 50: 'A', 51: 'B', 52: 'C', 53: 'D', 54: 'E', 55: 'o'}\n"
     ]
    }
   ],
   "source": [
    "# 生成序列\n",
    "transactions = generate_list(list_num=200, char_num=50, list_len=20, l_insert_p=0.95, c_insert_p=0.9)\n",
    "[print(transactions_i) for transactions_i in transactions[:10]];\n",
    "\n",
    "from itertools import chain\n",
    "from collections import Counter\n",
    "# 各字符数量\n",
    "print(Counter(chain.from_iterable(transactions)))\n",
    "\n",
    "# 元素序号字典\n",
    "char_index_dict = {value:i  for i, value in enumerate(sorted(set(chain.from_iterable(transactions))))}\n",
    "# 序号元素字典\n",
    "index_char_dict = {v:k for k, v in char_index_dict.items()}\n",
    "    \n",
    "print('char_index : ', char_index_dict)\n",
    "print('index_char : ', index_char_dict)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "199\n",
      " 10  ->  C    0.947   37 \n",
      "  A  ->  B    1.000   142 \n",
      "  A  ->  C    1.000   105 \n",
      "  A  ->  D    1.000   77 \n",
      "  A  ->  E    1.000   36 \n",
      "  B  ->  C    1.000   99 \n",
      "  B  ->  D    1.000   73 \n",
      "  B  ->  E    1.000   33 \n",
      "  C  ->  D    1.000   80 \n",
      "  C  ->  E    1.000   38 \n",
      "  D  ->  E    1.000   39 \n",
      "  o  ->  A    1.000   73 \n",
      "  o  ->  B    1.000   58 \n",
      "  o  ->  C    1.000   47 \n",
      "  o  ->  D    1.000   36 \n"
     ]
    }
   ],
   "source": [
    "# 共现矩阵\n",
    "co_occurrence_array  = np.zeros((len(char_index_dict), len(char_index_dict)), dtype=np.float16)\n",
    "\n",
    "for i, line_i in enumerate(transactions):\n",
    "    print('\\r{}'.format(i), end='')\n",
    "\n",
    "    for start, char_i in enumerate(line_i):\n",
    "        # print(char_i, line_i[start+1:] )\n",
    "\n",
    "        if len(line_i[start+1:]) > 0:\n",
    "            for char_j in line_i[start+1:]:\n",
    "                # print(char_i, char_j)\n",
    "                # 共现矩阵 +1\n",
    "                co_occurrence_array[char_index_dict[char_i]][char_index_dict[char_j]] += 1\n",
    "\n",
    "diff = (co_occurrence_array - co_occurrence_array.T) / (1E-5+co_occurrence_array.T + co_occurrence_array)\n",
    "# print(diff)\n",
    "\n",
    "print('')\n",
    "# 符合条件的坐标\n",
    "for (x, y) in zip(*np.where((diff > 0.93) & (co_occurrence_array > 30))):\n",
    "    print('{:^5s}->{:^5s}  {:.3f}   {:.0f} '.format(index_char_dict[x], index_char_dict[y], diff[x][y], co_occurrence_array[x][y]))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "forward_d = dict()\n",
    "backward_d = dict()\n",
    "for (x, y) in zip(*np.where((diff > 0.93) & (co_occurrence_array > 30))):\n",
    "    \n",
    "    # 前向\n",
    "    if index_char_dict[x] not in forward_d:\n",
    "        forward_d[index_char_dict[x]] = {index_char_dict[y]: co_occurrence_array[x][y]}\n",
    "    else:\n",
    "        forward_d[index_char_dict[x]].update({index_char_dict[y]: co_occurrence_array[x][y]})\n",
    "        \n",
    "    # 后向\n",
    "    if index_char_dict[y] not in backward_d:\n",
    "        backward_d[index_char_dict[y]] = {index_char_dict[x]: co_occurrence_array[x][y]}\n",
    "    else:\n",
    "        backward_d[index_char_dict[y]].update({index_char_dict[x]: co_occurrence_array[x][y]})"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Forward_D:\n",
      "{'10': {'C': 37.0},\n",
      " 'A': {'B': 142.0, 'C': 105.0, 'D': 77.0, 'E': 36.0},\n",
      " 'B': {'C': 99.0, 'D': 73.0, 'E': 33.0},\n",
      " 'C': {'D': 80.0, 'E': 38.0},\n",
      " 'D': {'E': 39.0},\n",
      " 'o': {'A': 73.0, 'B': 58.0, 'C': 47.0, 'D': 36.0}}\n",
      "Backward_D:\n",
      "{'A': {'o': 73.0},\n",
      " 'B': {'A': 142.0, 'o': 58.0},\n",
      " 'C': {'10': 37.0, 'A': 105.0, 'B': 99.0, 'o': 47.0},\n",
      " 'D': {'A': 77.0, 'B': 73.0, 'C': 80.0, 'o': 36.0},\n",
      " 'E': {'A': 36.0, 'B': 33.0, 'C': 38.0, 'D': 39.0}}\n"
     ]
    }
   ],
   "source": [
    "from pprint import pprint\n",
    "\n",
    "print('Forward_D:')\n",
    "pprint(forward_d)\n",
    "\n",
    "print('Backward_D:')\n",
    "pprint(backward_d)\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "#### 生成推荐规则"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "{'10': {'C': 37.0},\n",
       " 'A': {'B': 142.0, 'C': 6.0, 'D': -76.0, 'E': -74.0},\n",
       " 'B': {'C': 99.0, 'D': -7.0, 'E': -44.0},\n",
       " 'C': {'D': 80.0, 'E': -1.0},\n",
       " 'D': {'E': 39.0},\n",
       " 'o': {'A': 73.0, 'B': -84.0, 'C': -157.0, 'D': -194.0}}"
      ]
     },
     "execution_count": 7,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "dict_new = dict()\n",
    "\n",
    "for item_a, item_a_info in forward_d.items():\n",
    "    for item_b in item_a_info.keys():\n",
    "        \n",
    "        # 中间元素\n",
    "        item_m = forward_d[item_a].keys() & backward_d[item_b].keys()\n",
    "        # 中间元素数量\n",
    "        item_m_num = sum([backward_d[item_b][item_x]  for item_x in item_m])\n",
    "\n",
    "\n",
    "        if item_a not in dict_new:\n",
    "            dict_new[item_a] = {item_b: (forward_d[item_a][item_b]  - item_m_num)}\n",
    "        else:\n",
    "            dict_new[item_a].update({item_b: (forward_d[item_a][item_b]  - item_m_num)})\n",
    "\n",
    "dict_new"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "---\n",
    "### 前后顺序挖掘（要求AB严格相邻.    A->B 远大于 B->A）"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Counter({'Bread': 4, 'Milk': 4, 'Diaper': 4, 'Beer': 3, 'Coke': 2, 'Eggs': 1})\n"
     ]
    }
   ],
   "source": [
    "transactions = [\n",
    "                ['Bread', 'Milk'],\n",
    "                ['Bread', 'Diaper', 'Beer', 'Eggs'],\n",
    "                ['Milk', 'Diaper', 'Beer', 'Coke'],\n",
    "                ['Bread', 'Milk', 'Diaper', 'Beer'],\n",
    "                ['Bread', 'Milk', 'Diaper', 'Coke']\n",
    "            ]\n",
    "\n",
    "# 各字符数量\n",
    "print(Counter(chain.from_iterable(transactions)))\n",
    "\n",
    "# 元素序号字典\n",
    "char_index_dict = {value: i for i, value in enumerate(sorted(set(chain.from_iterable(transactions))))}\n",
    "# 序号元素字典\n",
    "index_char_dict = {v: k for k, v in char_index_dict.items()}\n",
    "    "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 共现矩阵\n",
    "co_occurrence_array = np.zeros((len(char_index_dict), len(char_index_dict)), dtype=np.float16)\n",
    "\n",
    "for i, line_i in enumerate(transactions):\n",
    "#     print('\\r{}'.format(i), end='')\n",
    "\n",
    "    for start, char_i in enumerate(line_i[:-1]):\n",
    "#         print(char_i, line_i[start + 1])\n",
    "\n",
    "        # 共现矩阵 +1\n",
    "        co_occurrence_array[char_index_dict[char_i]][char_index_dict[line_i[start + 1]]] += 1\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      " Bread  ->  Milk    1.000   3 \n",
      " Diaper ->  Beer    1.000   3 \n",
      "  Milk  -> Diaper   1.000   3 \n"
     ]
    }
   ],
   "source": [
    "diff = (co_occurrence_array - co_occurrence_array.T) / (1E-5 + co_occurrence_array.T + co_occurrence_array)\n",
    "\n",
    "print('')\n",
    "# 符合条件的坐标\n",
    "for (x, y) in zip(*np.where((diff > 0.93) & (co_occurrence_array >= 3))):\n",
    "    print('{:^8s}->{:^8s}  {:.3f}   {:.0f} '.format(index_char_dict[x], index_char_dict[y], diff[x][y],\n",
    "                                                    co_occurrence_array[x][y]))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "---\n",
    "---\n",
    "### FP_Growth 查找频繁子集  \n",
    "https://github.com/blackAndrechen/data_mine"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [],
   "source": [
    "from tqdm import tqdm\n",
    "\n",
    "class Node:\n",
    "    def __init__(self, node_name, count, parentNode):\n",
    "        self.name = node_name\n",
    "        self.count = count\n",
    "        self.nodeLink = None  # 根据nideLink可以找到整棵树中所有nodename一样的节点\n",
    "        self.parent = parentNode  # 父亲节点\n",
    "        self.children = {}  # 子节点{节点名字:节点地址}\n",
    "\n",
    "class Fp_growth():\n",
    "    def update_header(self, node, targetNode):  # 更新headertable中的node节点形成的链表\n",
    "        while node.nodeLink != None:\n",
    "            node = node.nodeLink\n",
    "        node.nodeLink = targetNode\n",
    "\n",
    "    def update_fptree(self, items, node, headerTable):  # 用于更新fptree\n",
    "        if items[0] in node.children:\n",
    "            # 判断items的第一个结点是否已作为子结点\n",
    "            node.children[items[0]].count += 1\n",
    "        else:\n",
    "            # 创建新的分支\n",
    "            node.children[items[0]] = Node(items[0], 1, node)\n",
    "            # 更新相应频繁项集的链表，往后添加\n",
    "            if headerTable[items[0]][1] == None:\n",
    "                headerTable[items[0]][1] = node.children[items[0]]\n",
    "            else:\n",
    "                self.update_header(headerTable[items[0]][1], node.children[items[0]])\n",
    "        # 递归\n",
    "        if len(items) > 1:\n",
    "            self.update_fptree(items[1:], node.children[items[0]], headerTable)\n",
    "\n",
    "    def create_fptree(self, data_set, min_support, flag=False):  # 建树主函数\n",
    "        '''\n",
    "        根据data_set创建fp树\n",
    "        header_table结构为\n",
    "        {\"nodename\":[num,node],..} 根据node.nodelink可以找到整个树中的所有nodename\n",
    "        '''\n",
    "        item_count = {}  # 统计各项出现次数\n",
    "        for t in data_set:  # 第一次遍历，得到频繁一项集\n",
    "            for item in t:\n",
    "                if item not in item_count:\n",
    "                    item_count[item] = 1\n",
    "                else:\n",
    "                    item_count[item] += 1\n",
    "        headerTable = {}\n",
    "        for k in item_count:  # 剔除不满足最小支持度的项\n",
    "            if item_count[k] >= min_support:\n",
    "                headerTable[k] = item_count[k]\n",
    "\n",
    "        freqItemSet = set(headerTable.keys())  # 满足最小支持度的频繁项集\n",
    "        if len(freqItemSet) == 0:\n",
    "            return None, None\n",
    "        for k in headerTable:\n",
    "            headerTable[k] = [headerTable[k], None]  # element: [count, node]\n",
    "        tree_header = Node('head node', 1, None)\n",
    "        if flag:\n",
    "            ite = tqdm(data_set)\n",
    "        else:\n",
    "            ite = data_set\n",
    "        for t in ite:  # 第二次遍历，建树\n",
    "            localD = {}\n",
    "            for item in t:\n",
    "                if item in freqItemSet:  # 过滤，只取该样本中满足最小支持度的频繁项\n",
    "                    localD[item] = headerTable[item][0]  # element : count\n",
    "            if len(localD) > 0:\n",
    "                # 根据全局频数从大到小对单样本排序\n",
    "                order_item = [v[0] for v in sorted(localD.items(), key=lambda x: x[1], reverse=True)]\n",
    "                # 用过滤且排序后的样本更新树\n",
    "                self.update_fptree(order_item, tree_header, headerTable)\n",
    "        return tree_header, headerTable\n",
    "\n",
    "    def find_path(self, node, nodepath):\n",
    "        '''\n",
    "        递归将node的父节点添加到路径\n",
    "        '''\n",
    "        if node.parent != None:\n",
    "            nodepath.append(node.parent.name)\n",
    "            self.find_path(node.parent, nodepath)\n",
    "\n",
    "    def find_cond_pattern_base(self, node_name, headerTable):\n",
    "        '''\n",
    "        根据节点名字，找出所有条件模式基\n",
    "        '''\n",
    "        treeNode = headerTable[node_name][1]\n",
    "        cond_pat_base = {}  # 保存所有条件模式基\n",
    "        while treeNode != None:\n",
    "            nodepath = []\n",
    "            self.find_path(treeNode, nodepath)\n",
    "            if len(nodepath) > 1:\n",
    "                cond_pat_base[frozenset(nodepath[:-1])] = treeNode.count\n",
    "            treeNode = treeNode.nodeLink\n",
    "        return cond_pat_base\n",
    "\n",
    "    def create_cond_fptree(self, headerTable, min_support, temp, freq_items, support_data):\n",
    "        # 最开始的频繁项集是headerTable中的各元素\n",
    "        freqs = [v[0] for v in sorted(headerTable.items(), key=lambda p: p[1][0])]  # 根据频繁项的总频次排序\n",
    "        for freq in freqs:  # 对每个频繁项\n",
    "            freq_set = temp.copy()\n",
    "            freq_set.add(freq)\n",
    "            freq_items.add(frozenset(freq_set))\n",
    "            if frozenset(freq_set) not in support_data:  # 检查该频繁项是否在support_data中\n",
    "                support_data[frozenset(freq_set)] = headerTable[freq][0]\n",
    "            else:\n",
    "                support_data[frozenset(freq_set)] += headerTable[freq][0]\n",
    "\n",
    "            cond_pat_base = self.find_cond_pattern_base(freq, headerTable)  # 寻找到所有条件模式基\n",
    "            cond_pat_dataset = []  # 将条件模式基字典转化为数组\n",
    "            for item in cond_pat_base:\n",
    "                item_temp = list(item)\n",
    "                item_temp.sort()\n",
    "                for i in range(cond_pat_base[item]):\n",
    "                    cond_pat_dataset.append(item_temp)\n",
    "            # 创建条件模式树\n",
    "            cond_tree, cur_headtable = self.create_fptree(cond_pat_dataset, min_support)\n",
    "            if cur_headtable != None:\n",
    "                self.create_cond_fptree(cur_headtable, min_support, freq_set, freq_items, support_data)  # 递归挖掘条件FP树\n",
    "\n",
    "    def generate_L(self, data_set, min_support):\n",
    "        freqItemSet = set()\n",
    "        support_data = {}\n",
    "        # 创建数据集的fptree\n",
    "        tree_header, headerTable = self.create_fptree(data_set, min_support, flag=True)\n",
    "        # 创建各频繁一项的fptree，并挖掘频繁项并保存支持度计数\n",
    "        self.create_cond_fptree(headerTable, min_support, set(), freqItemSet, support_data)\n",
    "\n",
    "        max_l = 0\n",
    "        for i in freqItemSet:  # 将频繁项根据大小保存到指定的容器L中\n",
    "            if len(i) > max_l: max_l = len(i)\n",
    "        L = [set() for _ in range(max_l)]\n",
    "        for i in freqItemSet:\n",
    "            L[len(i) - 1].add(i)\n",
    "        for i in range(len(L)):\n",
    "            print(\"frequent item {}:{}\".format(i + 1, len(L[i])))\n",
    "        return L, support_data\n",
    "\n",
    "    def generate_R(self, data_set, min_support, min_conf):\n",
    "        # 获取频繁子集\n",
    "        L, support_data = self.generate_L(data_set, min_support)\n",
    "\n",
    "        # 根据频繁子集，最小置信度  推荐规则生成\n",
    "        rule_list = []\n",
    "        sub_set_list = []\n",
    "\n",
    "        # 计算每个推荐子集置信度\n",
    "        for i in range(0, len(L)):\n",
    "            print(i)\n",
    "\n",
    "            for freq_set in L[i]:\n",
    "                for sub_set in sub_set_list:\n",
    "\n",
    "                    if sub_set.issubset(\n",
    "                            freq_set) and freq_set - sub_set in support_data:  # and freq_set-sub_set in support_data\n",
    "                        conf = support_data[freq_set] / support_data[freq_set - sub_set]\n",
    "                        big_rule = (freq_set - sub_set, sub_set, conf)\n",
    "                        if conf >= min_conf and big_rule not in rule_list:\n",
    "                            print(freq_set - sub_set, \" => \", sub_set, \"conf: \", conf)\n",
    "                            rule_list.append(big_rule)\n",
    "                sub_set_list.append(freq_set)\n",
    "        rule_list = sorted(rule_list, key=lambda x: (x[2]), reverse=True)\n",
    "        return rule_list\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████████████████████████████████████████████████████████████████████████████| 6/6 [00:00<00:00, 6004.73it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "frequent item 1:6\n",
      "frequent item 2:7\n",
      "frequent item 3:4\n",
      "frequent item 4:1\n",
      "[[['y', 'x'], 3],\n",
      " [['y', 'z'], 3],\n",
      " [['x', 't'], 3],\n",
      " [['y', 't'], 3],\n",
      " [['t', 'z'], 3],\n",
      " [['x', 's'], 3],\n",
      " [['x', 'z'], 3],\n",
      " [['y', 'x', 'z'], 3],\n",
      " [['y', 'x', 't'], 3],\n",
      " [['x', 't', 'z'], 3],\n",
      " [['y', 't', 'z'], 3],\n",
      " [['y', 'x', 't', 'z'], 3]]\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "\n"
     ]
    }
   ],
   "source": [
    "data_set = [['r', 'z', 'h', 'j', 'p'],\n",
    "            ['z', 'y', 'x', 'w', 'v', 'u', 't', 's'],\n",
    "            ['z'],\n",
    "            ['r', 'x', 'n', 'o', 's'],\n",
    "            ['y', 'r', 'x', 'z', 'q', 't', 'p'],\n",
    "            ['y', 'z', 'x', 'e', 'q', 's', 't', 'm']]\n",
    "\n",
    "fp = Fp_growth()\n",
    "\n",
    "min_support = 3  # 最小支持度  （最少出现 次数）\n",
    "# 获取频繁子集\n",
    "L, support_data = fp.generate_L(data_set, min_support)\n",
    "\n",
    "# 打印频繁项集\n",
    "support_items = [[list(k), v] for k, v in support_data.items() if len(k) > 1]\n",
    "support_items = sorted(support_items, key=lambda x: (x[1], len(x[0])))\n",
    "from pprint import pprint\n",
    "pprint(support_items)\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "#### 生成推荐规则"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0\n",
      "1\n",
      "frozenset({'y'})  =>  frozenset({'x'}) conf:  1.0\n",
      "frozenset({'x'})  =>  frozenset({'y'}) conf:  0.75\n",
      "frozenset({'t'})  =>  frozenset({'z'}) conf:  1.0\n",
      "frozenset({'s'})  =>  frozenset({'x'}) conf:  1.0\n",
      "frozenset({'x'})  =>  frozenset({'s'}) conf:  0.75\n",
      "frozenset({'t'})  =>  frozenset({'x'}) conf:  1.0\n",
      "frozenset({'x'})  =>  frozenset({'t'}) conf:  0.75\n",
      "frozenset({'x'})  =>  frozenset({'z'}) conf:  0.75\n",
      "frozenset({'y'})  =>  frozenset({'z'}) conf:  1.0\n",
      "frozenset({'y'})  =>  frozenset({'t'}) conf:  1.0\n",
      "frozenset({'t'})  =>  frozenset({'y'}) conf:  1.0\n",
      "2\n",
      "frozenset({'y', 'z'})  =>  frozenset({'t'}) conf:  1.0\n",
      "frozenset({'y', 't'})  =>  frozenset({'z'}) conf:  1.0\n",
      "frozenset({'t', 'z'})  =>  frozenset({'y'}) conf:  1.0\n",
      "frozenset({'y'})  =>  frozenset({'t', 'z'}) conf:  1.0\n",
      "frozenset({'t'})  =>  frozenset({'y', 'z'}) conf:  1.0\n",
      "frozenset({'t', 'z'})  =>  frozenset({'x'}) conf:  1.0\n",
      "frozenset({'x', 'z'})  =>  frozenset({'t'}) conf:  1.0\n",
      "frozenset({'x', 't'})  =>  frozenset({'z'}) conf:  1.0\n",
      "frozenset({'x'})  =>  frozenset({'t', 'z'}) conf:  0.75\n",
      "frozenset({'t'})  =>  frozenset({'x', 'z'}) conf:  1.0\n",
      "frozenset({'y', 't'})  =>  frozenset({'x'}) conf:  1.0\n",
      "frozenset({'y', 'x'})  =>  frozenset({'t'}) conf:  1.0\n",
      "frozenset({'x', 't'})  =>  frozenset({'y'}) conf:  1.0\n",
      "frozenset({'t'})  =>  frozenset({'y', 'x'}) conf:  1.0\n",
      "frozenset({'y'})  =>  frozenset({'x', 't'}) conf:  1.0\n",
      "frozenset({'x'})  =>  frozenset({'y', 't'}) conf:  0.75\n",
      "frozenset({'y', 'z'})  =>  frozenset({'x'}) conf:  1.0\n",
      "frozenset({'y', 'x'})  =>  frozenset({'z'}) conf:  1.0\n",
      "frozenset({'x', 'z'})  =>  frozenset({'y'}) conf:  1.0\n",
      "frozenset({'y'})  =>  frozenset({'x', 'z'}) conf:  1.0\n",
      "frozenset({'x'})  =>  frozenset({'y', 'z'}) conf:  0.75\n",
      "3\n",
      "frozenset({'y', 't', 'z'})  =>  frozenset({'x'}) conf:  1.0\n",
      "frozenset({'y', 'x', 'z'})  =>  frozenset({'t'}) conf:  1.0\n",
      "frozenset({'y', 'x', 't'})  =>  frozenset({'z'}) conf:  1.0\n",
      "frozenset({'z', 'x', 't'})  =>  frozenset({'y'}) conf:  1.0\n",
      "frozenset({'t', 'z'})  =>  frozenset({'y', 'x'}) conf:  1.0\n",
      "frozenset({'y', 'x'})  =>  frozenset({'t', 'z'}) conf:  1.0\n",
      "frozenset({'y', 'z'})  =>  frozenset({'x', 't'}) conf:  1.0\n",
      "frozenset({'y', 't'})  =>  frozenset({'x', 'z'}) conf:  1.0\n",
      "frozenset({'x', 't'})  =>  frozenset({'y', 'z'}) conf:  1.0\n",
      "frozenset({'x', 'z'})  =>  frozenset({'y', 't'}) conf:  1.0\n",
      "frozenset({'x'})  =>  frozenset({'y', 't', 'z'}) conf:  0.75\n",
      "frozenset({'y'})  =>  frozenset({'x', 't', 'z'}) conf:  1.0\n",
      "frozenset({'t'})  =>  frozenset({'y', 'x', 'z'}) conf:  1.0\n"
     ]
    }
   ],
   "source": [
    "\n",
    "min_conf = 0.7  # 最小置信度\n",
    "\n",
    "# 根据频繁子集，最小置信度  推荐规则生成\n",
    "rule_list = []\n",
    "sub_set_list = []\n",
    "\n",
    "# 计算每个推荐子集置信度\n",
    "for i in range(0, len(L)):\n",
    "    print(i)\n",
    "\n",
    "    for freq_set in L[i]:\n",
    "        for sub_set in sub_set_list:\n",
    "\n",
    "            if sub_set.issubset(\n",
    "                    freq_set) and freq_set - sub_set in support_data:  # and freq_set-sub_set in support_data\n",
    "                conf = support_data[freq_set] / support_data[freq_set - sub_set]\n",
    "                big_rule = (freq_set - sub_set, sub_set, conf)\n",
    "                if conf >= min_conf and big_rule not in rule_list:\n",
    "                    print(freq_set - sub_set, \" => \", sub_set, \"conf: \", conf)\n",
    "                    rule_list.append(big_rule)\n",
    "        sub_set_list.append(freq_set)\n",
    "rule_list = sorted(rule_list, key=lambda x: (x[2]), reverse=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "PY36-tf12",
   "language": "python",
   "name": "py36_b"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.10"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
