{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 说明\n",
    "\n",
    "* firefox记录各个页址，通过导出，形成onetab文件，它的格式是文本文件。但它的链接表示方式与markdown的链接有区别，为此需要转换。\n",
    "* 一般会将它转换成为todo.md\n",
    "* 可以用里面的工具将todo.md里的内容抽取出来，形成主题markdown文件，例如microsoft.md就是用关键字\"microsoft\"抽取出来的。\n",
    "* todo.md或者主题markdown文件里，有许多内容也许已经与现有的markdown重复，所以需要进行去重的工作。\n",
    "* 分析链接是否可用，有些链接可能已经失效了。\n",
    "* 计划使用textdistance库来判断两个字符串的相似度\n",
    "* ipython==8.12.0\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "from pathlib import Path\n",
    "import os\n",
    "import pandas as pd\n",
    "import requests\n",
    "\n",
    "def live(url:str)->bool:\n",
    "    headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) '\n",
    "               'Chrome/51.0.2704.63 Safari/537.36'}\n",
    "    try:\n",
    "        r = requests.get(url, headers=headers, timeout=30)\n",
    "        r.raise_for_status()\n",
    "        if r.status_code == 200:\n",
    "            return True\n",
    "\n",
    "        if r.status_code == 404:\n",
    "            return False\n",
    "\n",
    "    except Exception as e:\n",
    "        print(f'{url} parse failed: {e}')\n",
    "        return False\n",
    "\n",
    "def cleanfile(target_pd:pd.DataFrame, filename:str):\n",
    "    \"\"\"\n",
    "    根据目录生成的dataframe，来判断某文件中是否有重复的行（主要是链接），如果有则删除之。\n",
    "\n",
    "    Args:\n",
    "        target_pd (pd.DataFrame): 目标数据集，使用setupinfos生成\n",
    "        filename (str): 检查的文件\n",
    "    \"\"\"\n",
    "    myfile = open(filename, 'r', encoding='utf-8')\n",
    "    currentlines = myfile.readlines()\n",
    "    myfile.close()\n",
    "    \n",
    "    removelines = []\n",
    "    for line in currentlines:\n",
    "        _, url = extract_url(line)\n",
    "        if url is not None:\n",
    "            # 在pandas中查找相同url的行\n",
    "            result = target_pd[target_pd['url']==url]\n",
    "            if not result.empty:\n",
    "                removelines.append(line)\n",
    "\n",
    "    # 删除重复的行\n",
    "    newlines = [line for line in currentlines if line not in removelines]\n",
    "    myfile = open(filename, 'w', encoding='utf-8')\n",
    "    myfile.writelines(newlines)\n",
    "    myfile.close()\n",
    "\n",
    "\n",
    "def extract_url(line:str):\n",
    "    \"\"\"\n",
    "    从一行中提取URL\n",
    "    \"\"\"\n",
    "    if '](' in line:\n",
    "        title, url = line.split('](')\n",
    "        realtitle = title[title.find('[') + 1:]\n",
    "        url = url[:url.find(')')]\n",
    "        return realtitle, url\n",
    "    return None, None\n",
    "\n",
    "\n",
    "def setupinfos(startdir: str) -> pd.DataFrame:\n",
    "    \"\"\"\n",
    "    遍历指定目录，形成dataframe数据集供后续使用。如果文件发生变化，必须重新生成以形成最新的数据\n",
    "    \"\"\"\n",
    "    data = []\n",
    "    for root, dirs, files in os.walk(startdir):\n",
    "        for file in files:\n",
    "            filepath = Path(root, file)\n",
    "            if filepath.suffix == '.md':\n",
    "                with open(filepath, 'r', encoding='utf-8') as f:\n",
    "                    lines = f.readlines()\n",
    "                    for index, line in enumerate(lines):\n",
    "                        if '](' in line:\n",
    "                            title, url = extract_url(line)\n",
    "                            data.append({\n",
    "                                'filename': filepath,\n",
    "                                'lineno': index + 1,\n",
    "                                'content': line,\n",
    "                                'title': title,\n",
    "                                \"url\": url\n",
    "                            })\n",
    "\n",
    "    return pd.DataFrame(data)\n",
    "\n",
    "\n",
    "def removefileline(original_file:str, line_number:int):\n",
    "    \"\"\"\n",
    "    将指定文件的指定行号的行删除，一次只能够删除一行\n",
    "\n",
    "    Args:\n",
    "        original_file (str): 要修改的文件\n",
    "        line_number (int): 行号\n",
    "    \"\"\"\n",
    "    current_index = 1\n",
    "    original_file = Path(original_file)\n",
    "    dummy_file = original_file.with_suffix('.bak')\n",
    "    with open(original_file, 'r') as read_obj, open(dummy_file,'w') as write_obj:\n",
    "        # Line by line copy data from original file to dummy file\n",
    "        for line in read_obj:\n",
    "            # If current line number matches the user's line number then skip copying\n",
    "            if current_index != line_number:\n",
    "                write_obj.write(line)\n",
    "            current_index += 1\n",
    "\n",
    "    # remove original file\n",
    "    os.remove(original_file)\n",
    "    # Rename dummy file as the original file\n",
    "    os.rename(dummy_file, original_file)\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# step 4\n",
    "\n",
    "* 分析MARKDOWN中所有文档超链的有效性。随着时间的推移，有些链接失效了。需要及时的查找出来\n",
    "* 需要打开代理，有些网址并不好访问。\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import pandas as pd\n",
    "from time import sleep\n",
    "from IPython.display import clear_output\n",
    "from loguru import logger\n",
    "\n",
    "startdir = '/root/dev/markdown'\n",
    "loggerfile = 'fixinvalidurl.log'\n",
    "logger.add(loggerfile)\n",
    "\n",
    "def fixinvalidurl():\n",
    "    # 只遍历一次，因为文件的内容会变化。这是一个大坑。\n",
    "    result = setupinfos(startdir)\n",
    "    deactive = []\n",
    "    \n",
    "    #  遍历，然后判断链接是否生效\n",
    "    for index, row in result.iterrows():\n",
    "        if not live(row.url):\n",
    "            logger.info(f\"删除{row.filename,row.lineno,row.content}\")\n",
    "            deactive.append(index)\n",
    "    \n",
    "    # for index in deactive:\n",
    "        # print(f\"删除{result.loc[index].content}\")\n",
    "        # removefileline(result.loc[index].filename, result.loc[index].lineno)\n",
    "\n",
    "fixinvalidurl()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# step 3\n",
    "\n",
    "* 对指定目录下所有的markdown文件进行分析，提取超链后形成一个数据集，利用Pandas的能力分析哪些文件的行出现重复"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "无重复数据\n"
     ]
    }
   ],
   "source": [
    "import pandas as pd\n",
    "from time import sleep\n",
    "from IPython.display import clear_output\n",
    "\n",
    "startdir = '/root/dev/markdown'\n",
    "\n",
    "def action_once():\n",
    "    # 只遍历一次，因为文件的内容会变化。这是一个大坑。\n",
    "    result = setupinfos(startdir)\n",
    "    dup = result[result.duplicated(subset=['url'])]\n",
    "    if dup.empty:\n",
    "        print('无重复数据')\n",
    "        return None, None\n",
    "    else:\n",
    "        for index, row in dup.iterrows():\n",
    "\n",
    "            inputindex = []\n",
    "            indexno = 1\n",
    "\n",
    "            # 必须用url来精准对比，如果用title会重复许多。\n",
    "            dupdetail = result[result['url'] == row['url']]\n",
    "            for dupindex, duprow in dupdetail.iterrows():\n",
    "                inputindex.append(dupindex)\n",
    "                print(\n",
    "                    f\"{indexno} {duprow['filename']},{duprow['lineno']},{duprow['content']}\"\n",
    "                )\n",
    "                indexno += 1\n",
    "            break\n",
    "\n",
    "        sleep(1)\n",
    "        inputstr = input()\n",
    "        print(f'your choice is {inputstr}')\n",
    "\n",
    "        choice = int(inputstr)\n",
    "        if choice == 0:\n",
    "            return None, None\n",
    "\n",
    "        else:\n",
    "            index = inputindex[choice - 1]\n",
    "            selectpd = dupdetail.loc[index]\n",
    "            return selectpd.filename, selectpd.lineno\n",
    "\n",
    "\n",
    "# bexit = False\n",
    "bexit = True\n",
    "while (not bexit):\n",
    "    filename, lineno = action_once()\n",
    "    if filename is not None:\n",
    "        print(filename, lineno)\n",
    "        removefileline(filename, lineno)\n",
    "        clear_output()\n",
    "    else:\n",
    "        bexit = True"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# step 1\n",
    "\n",
    "* 将firefox onetab插件导出的文本文件转换成为markdown文件"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from pathlib import Path\n",
    "from smartai.onetabparser import pyparsing_onetab\n",
    "\n",
    "def parseonetab(sourcefilename: str, targetfilename: str):\n",
    "    \"\"\"\n",
    "    分析onetab文件的内容，将符合关键字的行挑出来\n",
    "    \"\"\"\n",
    "    source_path = Path(sourcefilename)\n",
    "    if source_path.exists() is False:\n",
    "        raise FileNotFoundError(f\"文件{sourcefilename}不存在！\")\n",
    "\n",
    "    parser = pyparsing_onetab()\n",
    "    parser.parseOnetab(sourcefilename, targetfilename)\n",
    "\n",
    "sourcefile = '/root/dev/markdown/gtd/done/onetab.txt'\n",
    "targetmd = '/root/dev/markdown/gtd/done/onetab.md'\n",
    "# parseonetab(sourcefile,targetmd)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# step 2\n",
    "\n",
    "* onetab转换成功的markdown文件里内容繁多，需要提取关键字。\n",
    "* 使用下面的函数可以方便的提取主题，形成相关的markdown文件"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from pathlib import Path\n",
    "import os\n",
    "import pandas as pd\n",
    "\n",
    "def parsemarkdown(sourcefilename: str, keyword: str, jieba=True):\n",
    "    sourcepath = Path(sourcefilename)\n",
    "    path = sourcepath.parent\n",
    "    targetfilename = Path(path, 'exchange', f'{keyword}.md')\n",
    "    with open(sourcefilename, 'r', encoding='utf-8') as f:\n",
    "        lines = f.readlines()\n",
    "        savedlines = []\n",
    "        with open(targetfilename, 'w', encoding='utf-8') as myfile:\n",
    "            for line in lines:\n",
    "                title, _ = line.split('](')\n",
    "                realtitle = title[title.find('[') + 1:]\n",
    "                if jieba:\n",
    "                    titles = jieba.cut(realtitle, cut_all=True)\n",
    "                    if keyword.lower() in realtitle.lower():\n",
    "                        for item in titles:\n",
    "                            if item.lower() == keyword.lower():\n",
    "                                myfile.write(line)\n",
    "                                break\n",
    "                    else:\n",
    "                        savedlines.append(line)\n",
    "                else:\n",
    "                    if keyword.lower() in title.lower():\n",
    "                        myfile.write(line)\n",
    "                    else:\n",
    "                        savedlines.append(line)\n",
    "\n",
    "        with open(sourcefilename, 'w', encoding='utf-8') as myfile:\n",
    "            myfile.writelines(savedlines)\n",
    "\n",
    "\n",
    "# 这个可以随便更改,通过关键字来生成相关的MARKDOWN文档保存搜索的记录。\n",
    "# keywords = ['数字人']\n",
    "keywords = []\n",
    "for keyword in keywords:\n",
    "    parsemarkdown('/root/dev/markdown/gtd/todo.md', keyword, False)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.10"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
