{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Title: #Synonymous Sentences"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Difficulty: #Medium"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Category Title: #Algorithms"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Tag Slug: #union-find #array #hash-table #string #backtracking"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Name Translated: #并查集 #数组 #哈希表 #字符串 #回溯"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Solution Name: generateSentences"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Translated Title: #近义词句子"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Translated Content:\n",
    "<p>给你一个近义词表&nbsp;<code>synonyms</code> 和一个句子&nbsp;<code>text</code>&nbsp;，&nbsp;<code>synonyms</code> 表中是一些近义词对 ，你可以将句子&nbsp;<code>text</code> 中每个单词用它的近义词来替换。</p>\n",
    "\n",
    "<p>请你找出所有用近义词替换后的句子，按&nbsp;<strong>字典序排序</strong>&nbsp;后返回。</p>\n",
    "\n",
    "<p>&nbsp;</p>\n",
    "\n",
    "<p><strong>示例 1：</strong></p>\n",
    "\n",
    "<pre>\n",
    "<strong>输入：\n",
    "</strong>synonyms = [[&quot;happy&quot;,&quot;joy&quot;],[&quot;sad&quot;,&quot;sorrow&quot;],[&quot;joy&quot;,&quot;cheerful&quot;]],\n",
    "text = &quot;I am happy today but was sad yesterday&quot;\n",
    "<strong>输出：\n",
    "</strong>[&quot;I am cheerful today but was sad yesterday&quot;,\n",
    "&quot;I am cheerful today but was sorrow yesterday&quot;,\n",
    "&quot;I am happy today but was sad yesterday&quot;,\n",
    "&quot;I am happy today but was sorrow yesterday&quot;,\n",
    "&quot;I am joy today but was sad yesterday&quot;,\n",
    "&quot;I am joy today but was sorrow yesterday&quot;]\n",
    "</pre>\n",
    "\n",
    "<p>&nbsp;</p>\n",
    "\n",
    "<p><strong>提示：</strong></p>\n",
    "\n",
    "<ul>\n",
    "\t<li><code>0 &lt;=&nbsp;synonyms.length &lt;= 10</code></li>\n",
    "\t<li><code>synonyms[i].length == 2</code></li>\n",
    "\t<li><code>synonyms[0] != synonyms[1]</code></li>\n",
    "\t<li>所有单词仅包含英文字母，且长度最多为&nbsp;<code>10</code> 。</li>\n",
    "\t<li><code>text</code>&nbsp;最多包含&nbsp;<code>10</code> 个单词，且单词间用单个空格分隔开。</li>\n",
    "</ul>\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Description: [synonymous-sentences](https://leetcode.cn/problems/synonymous-sentences/description/)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Solutions: [synonymous-sentences](https://leetcode.cn/problems/synonymous-sentences/solutions/)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "test_cases = ['[[\"happy\",\"joy\"],[\"sad\",\"sorrow\"],[\"joy\",\"cheerful\"]]\\n\"I am happy today but was sad yesterday\"', '[[\"happy\",\"joy\"],[\"cheerful\",\"glad\"]]\\n\"I am happy today but was sad yesterday\"']"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from typing import List\n",
    "import collections\n",
    "\n",
    "class Solution:\n",
    "    def generateSentences(self, synonyms: List[List[str]], text: str) -> List[str]:\n",
    "        text = text.split()\n",
    "        wordDict = collections.defaultdict(list)\n",
    "\n",
    "        for wordA, wordB in synonyms:\n",
    "            wordDict[wordA].append(wordB)\n",
    "            wordDict[wordB].append(wordA)\n",
    "        \n",
    "        indexDict = collections.defaultdict(set)\n",
    "        for i in range(len(text)):\n",
    "            if text[i] in wordDict:\n",
    "                for j in indexDict:\n",
    "                    if text[i] in indexDict[j]:\n",
    "                        indexDict[i] = indexDict[j]\n",
    "                        break\n",
    "                if not indexDict[i]:\n",
    "                    stack = [text[i]]\n",
    "                    visited = {text[i]}\n",
    "                    while stack:\n",
    "                        cur = stack.pop()\n",
    "                        for nex in wordDict[cur]:\n",
    "                            if nex not in visited:\n",
    "                                visited.add(nex)\n",
    "                                stack.append(nex)\n",
    "                    indexDict[i] = visited\n",
    "\n",
    "        if not indexDict:\n",
    "            return [\" \".join(text)]\n",
    "\n",
    "        for key in indexDict:\n",
    "            indexDict[key] = sorted(indexDict[key])\n",
    "        \n",
    "        stack = sorted(indexDict)\n",
    "        index = [0] * len(stack)\n",
    "        ans = []\n",
    "        while True:\n",
    "            for i in range(len(stack)):\n",
    "                text[stack[i]] = indexDict[stack[i]][index[i]]\n",
    "            ans.append(\" \".join(text))\n",
    "            i = len(index)-1\n",
    "            index[-1] += 1\n",
    "            while index[i] == len(indexDict[stack[i]]):\n",
    "                index[i] = 0\n",
    "                if i > 0:   \n",
    "                    index[i-1] += 1\n",
    "                    i -= 1\n",
    "            if sum(index) == 0: break\n",
    "        return ans"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from typing import List\n",
    "import collections\n",
    "\n",
    "class Solution:\n",
    "    def generateSentences(self, synonyms: List[List[str]], text: str) -> List[str]:\n",
    "        text = text.split()\n",
    "        wordDict = collections.defaultdict(list)\n",
    "\n",
    "        for wordA, wordB in synonyms:\n",
    "            wordDict[wordA].append(wordB)\n",
    "            wordDict[wordB].append(wordA)\n",
    "        \n",
    "        indexDict = collections.defaultdict(set)\n",
    "        for i in range(len(text)):\n",
    "            if text[i] in wordDict:\n",
    "                for j in indexDict:\n",
    "                    if text[i] in indexDict[j]:\n",
    "                        indexDict[i] = indexDict[j]\n",
    "                        break\n",
    "                if not indexDict[i]:\n",
    "                    stack = [text[i]]\n",
    "                    visited = {text[i]}\n",
    "                    while stack:\n",
    "                        cur = stack.pop()\n",
    "                        for nex in wordDict[cur]:\n",
    "                            if nex not in visited:\n",
    "                                visited.add(nex)\n",
    "                                stack.append(nex)\n",
    "                    indexDict[i] = visited\n",
    "\n",
    "        if not indexDict:\n",
    "            return [\" \".join(text)]\n",
    "\n",
    "        for key in indexDict:\n",
    "            indexDict[key] = sorted(indexDict[key])\n",
    "        \n",
    "        stack = sorted(indexDict)\n",
    "        index = [0] * len(stack)\n",
    "        ans = []\n",
    "        while True:\n",
    "            for i in range(len(stack)):\n",
    "                text[stack[i]] = indexDict[stack[i]][index[i]]\n",
    "            ans.append(\" \".join(text))\n",
    "            i = len(index)-1\n",
    "            index[-1] += 1\n",
    "            while index[i] == len(indexDict[stack[i]]):\n",
    "                index[i] = 0\n",
    "                if i > 0:   \n",
    "                    index[i-1] += 1\n",
    "                    i -= 1\n",
    "            if sum(index) == 0: break\n",
    "        return ans"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from typing import List\n",
    "import collections\n",
    "\n",
    "class Solution:\n",
    "    def generateSentences(self, synonyms: List[List[str]], text: str) -> List[str]:\n",
    "        # 1.首先构建关于近义词得hash table\n",
    "        def create_dict(synonyms):\n",
    "            hb = {}\n",
    "            for _, syn in enumerate(synonyms):\n",
    "                w1, w2 = syn[0], syn[1]\n",
    "                if w1 not in hb:\n",
    "                    hb[w1] = [w2]\n",
    "                else:\n",
    "                    hb[w1].append(w2)\n",
    "                if w2 not in hb:\n",
    "                    hb[w2] = [w1]\n",
    "                else:\n",
    "                    hb[w2].append(w1)\n",
    "            return hb\n",
    "        \n",
    "        def get_cands(key, cands): # 递归查找某个词的所有替换词\n",
    "            if key not in hb:\n",
    "                return cands\n",
    "            \n",
    "            for val in hb[key]:\n",
    "                if val not in cands:\n",
    "                    cands.append(val)\n",
    "                    cands = get_cands(val, cands)\n",
    "            \n",
    "            return cands\n",
    "\n",
    "        def replace(cur, path): # cur标记当前遍历位置\n",
    "            path.sort()\n",
    "            while cur < size and words[cur] not in hb: # 找到下一个需要替换得词\n",
    "                cur += 1\n",
    "\n",
    "            if cur == size:\n",
    "                return path\n",
    "            \n",
    "            key = words[cur]\n",
    "            # 查找hb, 找到 key 所有可替换的词\n",
    "            cands = get_cands(key, [key])\n",
    "            # cands.sort()\n",
    "            ppath = []\n",
    "            for p in path:\n",
    "                for cand in cands:\n",
    "                    temp = p.copy()\n",
    "                    temp[cur] = cand\n",
    "                    if temp not in path:\n",
    "                        ppath.append(temp)\n",
    "\n",
    "            path = replace(cur+1, path+ppath) \n",
    "            return path\n",
    "\n",
    "        hb = create_dict(synonyms)\n",
    "\n",
    "        # 2.拆分文本，依次查找hash table, 递归实现吧\n",
    "        words = text.split(' ')\n",
    "        size = len(words)\n",
    "        \n",
    "        res = replace(0, [words])\n",
    "        res = [' '.join(words) for words in res]\n",
    "        return res\n",
    "        "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from typing import List\n",
    "import collections\n",
    "\n",
    "class Solution:\n",
    "    def generateSentences(self, synonyms: List[List[str]], text: str) -> List[str]:\n",
    "        # 1.首先构建关于近义词得hash table\n",
    "        def create_dict(synonyms):\n",
    "            hb = {}\n",
    "            for _, syn in enumerate(synonyms):\n",
    "                w1, w2 = syn[0], syn[1]\n",
    "                if w1 not in hb:\n",
    "                    hb[w1] = [w2]\n",
    "                else:\n",
    "                    hb[w1].append(w2)\n",
    "                if w2 not in hb:\n",
    "                    hb[w2] = [w1]\n",
    "                else:\n",
    "                    hb[w2].append(w1)\n",
    "            return hb\n",
    "        \n",
    "        def get_cands(key, cands): # 递归查找某个词的所有替换词\n",
    "            if key not in hb:\n",
    "                return cands\n",
    "            \n",
    "            for val in hb[key]:\n",
    "                if val not in cands:\n",
    "                    cands.append(val)\n",
    "                    cands = get_cands(val, cands)\n",
    "            \n",
    "            return cands\n",
    "\n",
    "        def replace(cur, path): # cur标记当前遍历位置\n",
    "            # path.sort()\n",
    "            while cur < size and words[cur] not in hb: # 找到下一个需要替换得词\n",
    "                cur += 1\n",
    "\n",
    "            if cur == size:\n",
    "                return path\n",
    "            \n",
    "            key = words[cur]\n",
    "            # 查找hb, 找到 key 所有可替换的词\n",
    "            cands = get_cands(key, [key])\n",
    "            # cands.sort()\n",
    "            ppath = []\n",
    "            for p in path:\n",
    "                for cand in cands:\n",
    "                    temp = p.copy()\n",
    "                    temp[cur] = cand\n",
    "                    if temp not in path:\n",
    "                        ppath.append(temp)\n",
    "\n",
    "            path = replace(cur+1, path+ppath) \n",
    "            return path\n",
    "\n",
    "        hb = create_dict(synonyms)\n",
    "\n",
    "        # 2.拆分文本，依次查找hash table, 递归实现吧\n",
    "        words = text.split(' ')\n",
    "        size = len(words)\n",
    "        \n",
    "        res = replace(0, [words])\n",
    "        res = [' '.join(words) for words in res]\n",
    "        res.sort()\n",
    "        return res\n",
    "        "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from typing import List\n",
    "import collections\n",
    "\n",
    "class Solution:\n",
    "    def generateSentences(self, synonyms: List[List[str]], text: str) -> List[str]:\n",
    "        text = text.split()\n",
    "        wordDict = collections.defaultdict(list)\n",
    "\n",
    "        for wordA, wordB in synonyms:\n",
    "            wordDict[wordA].append(wordB)\n",
    "            wordDict[wordB].append(wordA)\n",
    "        \n",
    "        indexDict = collections.defaultdict(set)\n",
    "        for i in range(len(text)):\n",
    "            if text[i] in wordDict:\n",
    "                for j in indexDict:\n",
    "                    if text[i] in indexDict[j]:\n",
    "                        indexDict[i] = indexDict[j]\n",
    "                        break\n",
    "                if not indexDict[i]:\n",
    "                    stack = [text[i]]\n",
    "                    visited = {text[i]}\n",
    "                    while stack:\n",
    "                        cur = stack.pop()\n",
    "                        for nex in wordDict[cur]:\n",
    "                            if nex not in visited:\n",
    "                                visited.add(nex)\n",
    "                                stack.append(nex)\n",
    "                    indexDict[i] = visited\n",
    "\n",
    "        if not indexDict:\n",
    "            return [\" \".join(text)]\n",
    "\n",
    "        for key in indexDict:\n",
    "            indexDict[key] = sorted(indexDict[key])\n",
    "        \n",
    "        stack = sorted(indexDict)\n",
    "        index = [0] * len(stack)\n",
    "        ans = []\n",
    "        while True:\n",
    "            for i in range(len(stack)):\n",
    "                text[stack[i]] = indexDict[stack[i]][index[i]]\n",
    "            ans.append(\" \".join(text))\n",
    "            i = len(index)-1\n",
    "            index[-1] += 1\n",
    "            while index[i] == len(indexDict[stack[i]]):\n",
    "                index[i] = 0\n",
    "                if i > 0:   \n",
    "                    index[i-1] += 1\n",
    "                    i -= 1\n",
    "            if sum(index) == 0: break\n",
    "        return ans"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from typing import List\n",
    "import collections\n",
    "\n",
    "class Solution:\n",
    "    def generateSentences(self, synonyms: List[List[str]], text: str) -> List[str]:\n",
    "        g = collections.defaultdict(set)\n",
    "        dic = dict(synonyms)\n",
    "        for u, v in synonyms:\n",
    "            s = {u}\n",
    "            while u in dic:\n",
    "                u = dic[u]\n",
    "                s.add(u)\n",
    "            g[u].update(s)\n",
    "        def find(u):\n",
    "            if u in dic:\n",
    "                return find(dic[u])\n",
    "            return u \n",
    "        words = text.split()\n",
    "        n = len(words)\n",
    "        res = []\n",
    "        def backtrack(start):\n",
    "            if start <= n:\n",
    "                res.append(' '.join(words))\n",
    "            for i in range(start, n):\n",
    "                u = words[i]\n",
    "                for v in g[find(u)]:\n",
    "                    if u != v:\n",
    "                        words[i] = v\n",
    "                        backtrack(i + 1)\n",
    "                        words[i] = u \n",
    "        backtrack(0)\n",
    "        return sorted(res)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from typing import List\n",
    "import collections\n",
    "\n",
    "# 并查集\n",
    "class Dsu:\n",
    "    def __init__(self):\n",
    "        self.fa = defaultdict(str)\n",
    "    def find(self,x):\n",
    "        if x not in self.fa:\n",
    "            self.fa[x] = x\n",
    "        if self.fa[x] != x:\n",
    "            self.fa[x] = self.find(self.fa[x])\n",
    "        return self.fa[x]\n",
    "    def merge(self,x,y):\n",
    "        Root_x = self.find(x)\n",
    "        Root_y = self.find(y)\n",
    "        if Root_x != Root_y:\n",
    "            self.fa[Root_x] = Root_y\n",
    "class Solution:\n",
    "    def generateSentences(self, synonyms: List[List[str]], text: str) -> List[str]:\n",
    "        # 回溯\n",
    "        # 总共有 len(words)个单词,我们从第一个单词开始选择\n",
    "        words = text.split(' ')\n",
    "        res = set()\n",
    "        def dfs(i,path):\n",
    "            if i == len(words):\n",
    "                res.add(' '.join(path))\n",
    "                return\n",
    "            # 枚举选择的第i个单词\n",
    "            for s in mp.get(dsu.find(words[i]), {words[i]}):\n",
    "                path.append(s)\n",
    "                dfs(i+1,path)\n",
    "                path.pop()\n",
    "        # 连接近义词\n",
    "        dsu = Dsu()\n",
    "        for x1,x2 in synonyms:\n",
    "            dsu.merge(x1,x2)\n",
    "\n",
    "        mp = defaultdict(set)\n",
    "        for k in dsu.fa:\n",
    "            mp[dsu.find(k)].add(k)\n",
    "\n",
    "        dfs(0,[])\n",
    "        return sorted(res)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from typing import List\n",
    "import collections\n",
    "\n",
    "# 并查集\n",
    "class Dsu:\n",
    "    def __init__(self):\n",
    "        self.fa = defaultdict(str)\n",
    "    def find(self,x):\n",
    "        if x not in self.fa:\n",
    "            self.fa[x] = x\n",
    "        if self.fa[x] != x:\n",
    "            self.fa[x] = self.find(self.fa[x])\n",
    "        return self.fa[x]\n",
    "    def merge(self,x,y):\n",
    "        Root_x = self.find(x)\n",
    "        Root_y = self.find(y)\n",
    "        if Root_x != Root_y:\n",
    "            self.fa[Root_x] = Root_y\n",
    "class Solution:\n",
    "    def generateSentences(self, synonyms: List[List[str]], text: str) -> List[str]:\n",
    "        # 回溯\n",
    "        # 总共有 len(words)个单词,我们从第一个单词开始选择\n",
    "        words = text.split(' ')\n",
    "        res = set()\n",
    "        def dfs(i,path):\n",
    "            if i == len(words):\n",
    "                res.add(' '.join(path))\n",
    "                return\n",
    "            # 枚举选择的第i个单词\n",
    "            for s in mp[dsu.find(words[i])]|{words[i]}:\n",
    "                path.append(s)\n",
    "                dfs(i+1,path)\n",
    "                path.pop()\n",
    "        # 连接近义词\n",
    "        dsu = Dsu()\n",
    "        for x1,x2 in synonyms:\n",
    "            dsu.merge(x1,x2)\n",
    "\n",
    "        mp = defaultdict(set)\n",
    "        for k in dsu.fa:\n",
    "            mp[dsu.find(k)].add(k)\n",
    "\n",
    "        dfs(0,[])\n",
    "        return sorted(res)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from typing import List\n",
    "import collections\n",
    "\n",
    "class Solution:\n",
    "    def generateSentences(self, synonyms: List[List[str]], text: str) -> List[str]:\n",
    "        p = defaultdict(str)\n",
    "        for vec in synonyms:\n",
    "            for x in vec:\n",
    "                if x not in p:\n",
    "                    p[x] = x \n",
    "        for word in text.split():\n",
    "            if word not in p:\n",
    "                p[word] = word \n",
    "        \n",
    "        def find(x):\n",
    "            if x == p[x]:\n",
    "                return x \n",
    "            p[x] = find(p[x]) \n",
    "            return p[x] \n",
    "        def un(x,y):\n",
    "            fx, fy = find(x), find(y)\n",
    "            if fx == fy:\n",
    "                return False \n",
    "            p[fy] = fx \n",
    "            return True \n",
    "        for vec in synonyms:\n",
    "            m = len(vec) \n",
    "            for i in range(1,m):\n",
    "                un(p[vec[i-1]], p[vec[i]]) \n",
    "        f = defaultdict(set) \n",
    "        for vec in synonyms:\n",
    "            for x in vec:\n",
    "                fx = find(x) \n",
    "                f[fx].add(x) \n",
    "        for word in text.split():\n",
    "            fx = find(word)\n",
    "            f[fx].add(word) \n",
    "\n",
    "        words = text.split()\n",
    "        n= len(words) \n",
    "        ans = []\n",
    "        def dfs(i, s):\n",
    "            if i == n:\n",
    "                ans.append(s.strip(' ')) \n",
    "                return \n",
    "            for x in sorted(list(f[find(words[i])])):\n",
    "                dfs(i+1, s+' '+x) \n",
    "        dfs(0, '') \n",
    "        return ans \n",
    "\n",
    "\n",
    "        \n",
    "\n",
    "         "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from typing import List\n",
    "import collections\n",
    "\n",
    "class Solution:\n",
    "    def generateSentences(self, synonyms: List[List[str]], text: str) -> List[str]:\n",
    "        fa=dict()\n",
    "        def find(x):\n",
    "            if x not in fa:\n",
    "                fa[x]=x\n",
    "            elif fa[x]!=x:\n",
    "                fa[x]=find(fa[x])\n",
    "            return fa[x]\n",
    "        def union(a,b):\n",
    "            if find(a)!=find(b):\n",
    "                fa[find(a)]=find(b)\n",
    "        for a,b in synonyms:\n",
    "            union(a,b)\n",
    "        ans=[]\n",
    "        c=collections.defaultdict(set)\n",
    "        for key in fa:\n",
    "            c[find(key)].add(key)\n",
    "        setlist=list(c.values())\n",
    "        ls=text.split(\" \")\n",
    "        n=len(ls)\n",
    "        def dfs(x):\n",
    "            if x==n:\n",
    "                ans.append(\" \".join(ls))\n",
    "                return \n",
    "            find=False\n",
    "            now=ls[x]\n",
    "            for s in setlist:\n",
    "                if now in s:\n",
    "                    find=True\n",
    "                    break\n",
    "            if not find:\n",
    "                dfs(x+1)\n",
    "            else:\n",
    "                for ch in s:\n",
    "                    ls[x]=ch\n",
    "                    dfs(x+1)\n",
    "        dfs(0)\n",
    "        ans.sort()\n",
    "        return ans "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from typing import List\n",
    "import collections\n",
    "\n",
    "class Solution:\n",
    "    def generateSentences(self, synonyms: List[List[str]], text: str) -> List[str]:\n",
    "        fa=dict()\n",
    "        def find(x):\n",
    "            if x not in fa:\n",
    "                fa[x]=x\n",
    "            elif fa[x]!=x:\n",
    "                fa[x]=find(fa[x])\n",
    "            return fa[x]\n",
    "        def union(a,b):\n",
    "            if find(a)!=find(b):\n",
    "                fa[find(a)]=find(b)\n",
    "        for a,b in synonyms:\n",
    "            union(a,b)\n",
    "        ans=[]\n",
    "        c=collections.defaultdict(set)\n",
    "        for key in fa:\n",
    "            c[find(key)].add(key)\n",
    "        setlist=list(c.values())\n",
    "        #print(s)\n",
    "        ls=text.split(\" \")\n",
    "        n=len(ls)\n",
    "        def dfs(x,l):\n",
    "            if x==n:\n",
    "                #print(ls)\n",
    "                #if l!=0:\n",
    "                ans.append(\" \".join(ls))\n",
    "                return \n",
    "            find=False\n",
    "            now=ls[x]\n",
    "            for s in setlist:\n",
    "                if now in s:\n",
    "                    find=True\n",
    "                    break\n",
    "            if not find:\n",
    "                dfs(x+1,l)\n",
    "            else:\n",
    "                for ch in s:\n",
    "                    ls[x]=ch\n",
    "                    if ch!=now:\n",
    "                        dfs(x+1,l+1)\n",
    "                    else:\n",
    "                        dfs(x+1,l)\n",
    "        dfs(0,0)\n",
    "        ans.sort()\n",
    "        return ans "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from typing import List\n",
    "import collections\n",
    "\n",
    "# 并查集\n",
    "class Dsu:\n",
    "    def __init__(self):\n",
    "        self.fa = defaultdict(str)\n",
    "    def find(self,x):\n",
    "        if x not in self.fa:\n",
    "            self.fa[x] = x\n",
    "        if self.fa[x] != x:\n",
    "            self.fa[x] = self.find(self.fa[x])\n",
    "        return self.fa[x]\n",
    "    def merge(self,x,y):\n",
    "        Root_x = self.find(x)\n",
    "        Root_y = self.find(y)\n",
    "        if Root_x != Root_y:\n",
    "            self.fa[Root_x] = Root_y\n",
    "class Solution:\n",
    "    def generateSentences(self, synonyms: List[List[str]], text: str) -> List[str]:\n",
    "        dsu = Dsu()\n",
    "        for x,y in synonyms:\n",
    "            dsu.merge(x,y)\n",
    "        # 将同义词放在一起\n",
    "        words = text.split(' ')\n",
    "        mp = defaultdict(set)\n",
    "        for k in dsu.fa:\n",
    "            mp[dsu.find(k)].add(k)\n",
    "        path = []\n",
    "        res = set()\n",
    "        # 回溯\n",
    "        def dfs(i,path):\n",
    "            # 表示从第i个单词开始选择\n",
    "            if i == len(words):\n",
    "                res.add(' '.join(path))\n",
    "                return\n",
    "            for word in mp.get(dsu.find(words[i]),{words[i]}):\n",
    "                path.append(word)\n",
    "                dfs(i+1,path)\n",
    "                path.pop()\n",
    "        dfs(0,path)\n",
    "        return sorted(res)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from typing import List\n",
    "import collections\n",
    "\n",
    "class Solution:\n",
    "    def generateSentences(self, synonyms: List[List[str]], text: str) -> List[str]:\n",
    "        fa=dict()\n",
    "        def find(x):\n",
    "            if x not in fa:\n",
    "                fa[x]=x\n",
    "            elif fa[x]!=x:\n",
    "                fa[x]=find(fa[x])\n",
    "            return fa[x]\n",
    "        def union(a,b):\n",
    "            if find(a)!=find(b):\n",
    "                fa[find(a)]=find(b)\n",
    "        for a,b in synonyms:\n",
    "            union(a,b)\n",
    "        ans=[]\n",
    "        c=collections.defaultdict(set)\n",
    "        for key in fa:\n",
    "            c[find(key)].add(key)\n",
    "        setlist=list(c.values())\n",
    "        #print(s)\n",
    "        ls=text.split(\" \")\n",
    "        n=len(ls)\n",
    "        def dfs(x):\n",
    "            if x==n:\n",
    "                #print(ls)\n",
    "                #if l!=0:\n",
    "                ans.append(\" \".join(ls))\n",
    "                return \n",
    "            find=False\n",
    "            now=ls[x]\n",
    "            for s in setlist:\n",
    "                if now in s:\n",
    "                    find=True\n",
    "                    break\n",
    "            if not find:\n",
    "                dfs(x+1)\n",
    "            else:\n",
    "                for ch in s:\n",
    "                    ls[x]=ch\n",
    "                    #if ch!=now:\n",
    "                    dfs(x+1)\n",
    "                    #else:\n",
    "                    #    dfs(x+1,l)\n",
    "        dfs(0)\n",
    "        ans.sort()\n",
    "        return ans "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from typing import List\n",
    "import collections\n",
    "\n",
    "class setUnion(object):\n",
    "    def __init__(self,size):\n",
    "        self.parent=list(range(size))\n",
    "        self.rank=[1]*size\n",
    "        self.setCount=size\n",
    "    def find(self,i):\n",
    "        if self.parent[i]==i:\n",
    "            return i\n",
    "        else:\n",
    "            self.parent[i]=self.find(self.parent[i])\n",
    "            return self.parent[i]\n",
    "    def union(self,i,j):\n",
    "        x,y=self.find(i),self.find(j)\n",
    "        if x==y:\n",
    "            return\n",
    "        if self.rank[x]<self.rank[y]:\n",
    "            x,y=y,x\n",
    "        self.rank[x]+=self.rank[y]\n",
    "        self.parent[y]=x\n",
    "        self.setCount-=1\n",
    "    def is_connected(self,i,j):\n",
    "        return self.find(i)==self.find(j)\n",
    "    def get_partset(self):\n",
    "        part=defaultdict(list)\n",
    "        for i in range(len(self.parent)):\n",
    "            part[self.find(i)].append(i)\n",
    "        return part\n",
    "class Solution:\n",
    "    def generateSentences(self, synonyms: List[List[str]], text: str) -> List[str]:\n",
    "        #并查集过程\n",
    "        words=[]\n",
    "        for x in synonyms:\n",
    "            words.extend(x)\n",
    "        words=list(set(words))\n",
    "        ind={word:i for i,word in enumerate(words)}\n",
    "        n=len(words)\n",
    "        kkk=setUnion(n)\n",
    "        for word1,word2 in synonyms:\n",
    "            kkk.union(ind[word1],ind[word2])\n",
    "        part=kkk.get_partset()\n",
    "        for k in part:\n",
    "            part[k].sort(key= lambda x:words[x])\n",
    "        #全排列过程,利用回溯算法,暴力深搜\n",
    "        text=text.split(\" \")\n",
    "        m,path,ans=len(text),[],[]\n",
    "        def dfs(i):\n",
    "            if i==m:\n",
    "                ans.append(\" \".join(path))\n",
    "                return \n",
    "            if text[i] not in ind:\n",
    "                path.append(text[i])\n",
    "                dfs(i+1)\n",
    "                path.pop()\n",
    "            else:\n",
    "                for j in part[kkk.find(ind[text[i]])]:\n",
    "                    path.append(words[j])\n",
    "                    dfs(i+1)\n",
    "                    path.pop()\n",
    "        dfs(0)\n",
    "        return ans"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from typing import List\n",
    "import collections\n",
    "\n",
    "class Solution:\n",
    "    def generateSentences(self, synonyms: List[List[str]], text: str) -> List[str]:\n",
    "        # 1.首先构建关于近义词得hash table\n",
    "        def create_dict(synonyms):\n",
    "            hb = {}\n",
    "            for _, syn in enumerate(synonyms):\n",
    "                w1, w2 = syn[0], syn[1]\n",
    "                if w1 not in hb:\n",
    "                    hb[w1] = [w2]\n",
    "                else:\n",
    "                    hb[w1].append(w2)\n",
    "                if w2 not in hb:\n",
    "                    hb[w2] = [w1]\n",
    "                else:\n",
    "                    hb[w2].append(w1)\n",
    "            return hb\n",
    "        \n",
    "        def get_cands(key, cands): # 递归查找某个词的所有替换词\n",
    "            if key not in hb:\n",
    "                return cands\n",
    "            \n",
    "            for val in hb[key]:\n",
    "                if val not in cands:\n",
    "                    cands.append(val)\n",
    "                    cands = get_cands(val, cands)\n",
    "            \n",
    "            return cands\n",
    "\n",
    "        def replace(cur, path): # cur标记当前遍历位置\n",
    "            # path.sort()\n",
    "            while cur < size and words[cur] not in hb: # 找到下一个需要替换得词\n",
    "                cur += 1\n",
    "\n",
    "            if cur == size:\n",
    "                return path\n",
    "            \n",
    "            key = words[cur]\n",
    "            # 查找hb, 找到 key 所有可替换的词\n",
    "            cands = get_cands(key, [key])\n",
    "            # cands.sort()\n",
    "            ppath = []\n",
    "            for p in path:\n",
    "                for cand in cands:\n",
    "                    temp = p.copy()\n",
    "                    temp[cur] = cand\n",
    "                    if temp not in path:\n",
    "                        ppath.append(temp)\n",
    "\n",
    "            path = replace(cur+1, path+ppath) \n",
    "            return path\n",
    "\n",
    "        hb = create_dict(synonyms)\n",
    "\n",
    "        # 2.拆分文本，依次查找hash table, 递归实现吧\n",
    "        words = text.split(' ')\n",
    "        size = len(words)\n",
    "        \n",
    "        res = replace(0, [words])\n",
    "        res = [' '.join(words) for words in res]\n",
    "        res.sort()\n",
    "        return res\n",
    "        "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from typing import List\n",
    "import collections\n",
    "\n",
    "class Solution:\n",
    "    def generateSentences(self, synonyms: List[List[str]], text: str) -> List[str]:\n",
    "        p=collections.defaultdict()\n",
    "        for x,y in synonyms:\n",
    "            p[x]=x\n",
    "            p[y]=y\n",
    "        def find(x):\n",
    "            if p[x]==x:\n",
    "                return p[x]\n",
    "            p[x]=find(p[x])\n",
    "            return p[x]\n",
    "        def union(x,y):\n",
    "            p[find(x)]=find(y)\n",
    "        for s,e in synonyms:\n",
    "            union(s,e)\n",
    "        newp=collections.defaultdict(set)\n",
    "        for x in p:\n",
    "            a=find(x)\n",
    "            newp[a].add(x)\n",
    "        arr=text.split(\" \")\n",
    "        ans=[]\n",
    "        n=len(arr)\n",
    "        def dfs(i,tmp):\n",
    "            nonlocal ans\n",
    "            if i==n:\n",
    "                ans.append(\" \".join(tmp))\n",
    "                return \n",
    "            tmp1=tmp.copy()\n",
    "            if arr[i] not in p:\n",
    "                dfs(i+1,tmp1+[arr[i]])\n",
    "            else:\n",
    "                f=find(arr[i])\n",
    "                for y in newp[f]:\n",
    "                    dfs(i+1,tmp1+[y])\n",
    "            return \n",
    "        dfs(0,[])\n",
    "        ans.sort()\n",
    "        return ans\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "        "
   ]
  }
 ],
 "metadata": {},
 "nbformat": 4,
 "nbformat_minor": 2
}
