{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 112,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 1.打开网页\n",
    "# 2.根据不同检索条件打开网页\n",
    "# 3.获取条目信息\n",
    "# 4.获取分页条目信息\n",
    "# 5.获取条目详细信息\n",
    "# 6.保存到数据库\n",
    "\n",
    "# url=\"http://www.rong360.com/shanghai/search.html?loan_limit=5&loan_term=12&application_type=9\"\n",
    "import urllib.request\n",
    "import re\n",
    "from bs4 import BeautifulSoup\n",
    "import pymysql\n",
    "import csv\n",
    "\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 113,
   "metadata": {},
   "outputs": [],
   "source": [
    "# url,user,pwd,name='106.13.49.215','root','root','jup'\n",
    "# db=pymysql.connect(url,user,pwd,name)\n",
    "# cursor=db.cursor()\n",
    "# create=\"create database rong360\"\n",
    "# drop=\"DROP TABLE IF EXISTS sku\"\n",
    "# cursor.execute(drop)\n",
    "# db.commit()\n",
    "# id name click_url specs reqs lixi succesrate view lv\n",
    "# create= \"\"\"CREATE TABLE item (\n",
    "#          id bigint(20) not null AUTO_INCREMENT,\n",
    "#            pid varchar(32) NOT NULL,\n",
    "#          name varchar(32) NOT NULL,\n",
    "#          click_url varchar(32) NOT NULL,\n",
    "#          specs varchar(32) NOT NULL,\n",
    "#          reqs varchar(32) NOT NULL,\n",
    "#          lixi varchar(32) NOT NULL,\n",
    "#          succesrate varchar(32) NOT NULL,\n",
    "#          view varchar(32) NOT NULL,\n",
    "#          lv varchar(32) NOT NULL,\n",
    "#          PRIMARY KEY (id) USING BTREE\n",
    "#          )\"\"\"\n",
    "\n",
    "# cursor.execute(create)\n",
    "# db.commit()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 114,
   "metadata": {},
   "outputs": [],
   "source": [
    "def getHtml(url,encoding):\n",
    "    print(\"Downloading: \" + url)\n",
    "    num_retries = 2\n",
    "    user_agent = 'wswp'\n",
    "    headers = {'User-agent': user_agent}\n",
    "    req = urllib.request.Request(url=url,headers=headers)\n",
    "    try:\n",
    "        html = urllib.request.urlopen(req).read()\n",
    "        html = html.decode(encoding)\n",
    "    except urllib.request.URLError as e:\n",
    "        print(\"Download error: \" + e.reason)\n",
    "        html = None\n",
    "        if num_retries > 0:\n",
    "            if hasattr(e, 'code') and 500 <= e.code < 600:\n",
    "                return getHtml(url, user_agent, num_retries - 1)\n",
    "    return html"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 115,
   "metadata": {},
   "outputs": [],
   "source": [
    "# get all item name\\click_url on page\n",
    "def getItemIdNameClickUrls(html_doc):\n",
    "    soup = BeautifulSoup(html_doc, 'html.parser')\n",
    "    itemNames = []\n",
    "    for link in soup.find_all('a'):\n",
    "        list = []\n",
    "        if link.get('click-url') != None and link.text!='查看':\n",
    "            # https://www.rong360.com/p_d12e611cihoofzc6?st=1&from=search&pos=1&fpid=294e1ced5f8aefb6b0ceb6e1aa4bf93a&uf=s_58d3abf26a0e121906da36676d7ad5e1&star=5\n",
    "            # itemId\n",
    "            list.append(link.get('click-url').split('?')[0].split('/')[3])\n",
    "            list.append(link.text)\n",
    "            list.append('http:'+link.get('click-url'))\n",
    "            itemNames.append(tuple(list))\n",
    "            # print(\"------------\")\n",
    "        # print(link.text)\n",
    "        # print(link.get('click-url'))\n",
    "        # itemNames.add(link.text)\n",
    "    return itemNames;"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 116,
   "metadata": {},
   "outputs": [],
   "source": [
    "def search(limits,terms):\n",
    "    itemList=[]\n",
    "    for limit in limits:\n",
    "        for term in terms:\n",
    "            html_url = \"http://www.rong360.com/shanghai/search.html?loan_limit=\" + limit + \"&loan_term=\" + term + \"&application_type=9\"\n",
    "#             print(html_url)\n",
    "            print('get all item id name click_url')\n",
    "            html_doc = getHtml(html_url, 'UTF-8')\n",
    "            items=getItemInfo(html_doc)\n",
    "            print(\"getItemInfo======\")\n",
    "#             print(items)\n",
    "            for item in items:\n",
    "                itemList.append(item)\n",
    "            pages=getPage(html_doc)\n",
    "            if len(pages)>0:\n",
    "                for page in pages:\n",
    "                    html_doc = getHtml(html_url, 'UTF-8')\n",
    "                    items=getItemInfo(html_doc)\n",
    "                    print(\"page getItemInfo======\")\n",
    "                    for item in items:\n",
    "                        itemList.append(item)   \n",
    "    return itemList\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 117,
   "metadata": {},
   "outputs": [],
   "source": [
    "# get logo\n",
    "def getLogo(html_doc):\n",
    "    soup = BeautifulSoup(html_doc, 'html.parser')\n",
    "    logos = []\n",
    "    for link in soup.find_all('img'):\n",
    "        list = []\n",
    "        if link.get('alt')!=None:\n",
    "            list.append(link.get('src'))\n",
    "            list.append(link.get('title'))\n",
    "            logos.append(list)\n",
    "            # print(link)\n",
    "            # print(link.get('src')+' '+link.get('alt')+' '+link.get('title'))\n",
    "            # print('===========================')\n",
    "        # print(link.get('alt'))\n",
    "        # print(link.get('title'))\n",
    "    return logos"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 118,
   "metadata": {},
   "outputs": [],
   "source": [
    "# get item_info\n",
    "def getItemInfo(html_doc):\n",
    "    soup = BeautifulSoup(html_doc, 'html.parser')\n",
    "    itemInfos = []\n",
    "    for link in soup.find_all('div',class_='item_info'):\n",
    "            list=[]\n",
    "            print(\"id name click_url specs reqs lixi succesrate view lv\")\n",
    "            list.append(link.find('a').get('click-url').split('?')[0].split('/')[3])\n",
    "            list.append(link.find('a').text)\n",
    "            list.append('http:'+link.find('a').get('click-url'))\n",
    "            list.append(link.find('ul',class_='meta_sep specs').text.replace('\\n',' '))\n",
    "            list.append(link.find('ul',class_='meta_sep reqs').text.replace('\\n',' '))\n",
    "            list.append(link.find('ul',class_='meta_sep lixi').text.replace('\\n',' ').replace('\\u3000','').replace('\\xa0',''))\n",
    "            list.append(link.find('ul',class_='meta_sep succesrate').text.replace('\\n',' ').replace('额度','').replace(' ',''))\n",
    "            list.append(link.find('ul',class_='meta_sep view').text.replace('\\n',' ').replace('查看','').replace('成功率','').replace('\\xa0','').replace(' ',''))\n",
    "            lv=link.find('ul',class_='meta_sep lixi').find('li',class_='spec lilv more').find('span')\n",
    "            list.append(lv.get('hover-tip').replace('<span class=\"span-gray\">','').replace('<br>',' ').replace('<br/>',' ').replace('</span>',''))\n",
    "            itemInfos.append(list)\n",
    "#             print(list)\n",
    "#             print('===========================')\n",
    "    return itemInfos\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 119,
   "metadata": {},
   "outputs": [],
   "source": [
    "#分页数据\n",
    "def getPage(html_doc):\n",
    "    soup = BeautifulSoup(html_doc, 'html.parser')\n",
    "    pageNum=[]\n",
    "    if soup.find('div',class_='page') != None:\n",
    "        for link in soup.find('div',class_='page').find_all('a'):\n",
    "            print(link.text)\n",
    "            print('http://www.rong360.com'+link.get(\"href\"))\n",
    "            if link.text !='下一页':\n",
    "                pageNum.append('http://www.rong360.com'+link.get(\"href\"))\n",
    "    return pageNum\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 120,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 打开链接 查看产品详细\n",
    "def getItemDetail(html_doc):\n",
    "    detail=[]\n",
    "    dt=[]\n",
    "    soup = BeautifulSoup(html_doc, 'html.parser')\n",
    "    link=soup.find('div',class_='new-product-info')\n",
    "    detail.append(link.find('h1').text.replace('  ',''))\n",
    "    detail.append((link.find('div',class_='import-info').text.replace('\\n','').replace('   ','')))\n",
    "    tip=''\n",
    "    for sp in link.find('p',class_='item').find_all('span'):\n",
    "        tip=tip+(sp.text)\n",
    "        if sp.get('hover-tip') != None:\n",
    "            # print('------------------')\n",
    "            tip=tip+(sp.get('hover-tip').replace('\\r\\n',''))\n",
    "            tip=tip.replace('\\n提前还款说明\\n ','').replace('<span class=\"span-gray\">','：').replace('；</span>','')\n",
    "            # print('===================')\n",
    "        tip+=' '\n",
    "    detail.append(tip)\n",
    "    wrap=''\n",
    "    for ls in link.find_all('div',class_='field-wrap'):\n",
    "        # print(ls.text.replace('\\n','').replace('  ',''))\n",
    "        rp=ls.text.replace('\\n','').replace('  ','').replace(' ','').replace('贷款','  贷款')\n",
    "        if ls.find('input') != None:\n",
    "            rp=rp.replace('万元',ls.find('input').get('value')+'万元').replace('个月',ls.find('input').get('value')+'个月')\n",
    "        wrap=wrap+rp\n",
    "        wrap+=' '\n",
    "    detail.append(wrap)\n",
    "    if link.find('div',class_='product-commit') != None:\n",
    "        detail.append((link.find('div',class_='product-commit').text.replace('\\n','')))\n",
    "    for link in soup.find_all('div',class_='pd_block_notice'):\n",
    "        for ct in link.find_all('div',class_='pd_other_item_content'):\n",
    "            detail.append(ct.text.replace(' ','').replace('\\n','').replace('\\r',''))\n",
    "\n",
    "    caseList=''\n",
    "    for case in soup.find_all('div','case-item'):\n",
    "        caseList=caseList+(case.find('div',class_='tag').text+'万 '+case.find('div',class_='case-content').text.replace('\\n','').replace('\\xa0',''))\n",
    "\n",
    "    detail.append(caseList)\n",
    "#     print('name import-info rate_desc item_desc product_commit condition material pd_lilv sucess-case')\n",
    "#     print(detail)\n",
    "    return detail"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 121,
   "metadata": {},
   "outputs": [],
   "source": [
    "# limits=['0.3','1.0','3.0','10.0','20.0','50.0','100.0','5.0'];\n",
    "# terms=['3','6','12','24','36','60','120']\n",
    "# itemList=search(limits,terms)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 122,
   "metadata": {},
   "outputs": [],
   "source": [
    "# # limits=['0.3','5.0'];\n",
    "# # terms=['3','12']\n",
    "# limits=['0.3','1.0','3.0','10.0','20.0','50.0','100.0','5.0'];\n",
    "# terms=['3','6','12','24','36','60','120']\n",
    "# itemList=search(limits,terms)\n",
    "# print(\"=========================================\")\n",
    "# # print(itemList)\n",
    "\n",
    "# test_csv=\"rong360.csv\"\n",
    "# # test_data=result[itemList:]\n",
    "# headers=['id','name','click_url','specs','reqs','lixi','succesrate','view','lv']\n",
    "\n",
    "# with open(test_csv,'w') as f:\n",
    "#     f_csv=csv.writer(f)\n",
    "#     f_csv.writerow(headers)\n",
    "#     f_csv.writerows(itemList)\n",
    "# print('write test csv success')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 123,
   "metadata": {
    "collapsed": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "['id', 'name', 'click_url', 'specs', 'reqs', 'lixi', 'succesrate', 'view', 'lv']\n",
      "1371\n",
      "http://www.rong360.com/p_0cd6d122ihootc65?st=1&from=search&rstrategy=extend_all&rpos=1&fpid=560f631116b282d6435b81dfc04995d1&uf=s_06c82f3765c3f9305668e43244d30ebe&star=5\n",
      "Downloading: http://www.rong360.com/p_0cd6d122ihootc65?st=1&from=search&rstrategy=extend_all&rpos=1&fpid=560f631116b282d6435b81dfc04995d1&uf=s_06c82f3765c3f9305668e43244d30ebe&star=5\n"
     ]
    },
    {
     "ename": "NameError",
     "evalue": "name 'detail' is not defined",
     "traceback": [
      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[0;31mNameError\u001b[0m                                 Traceback (most recent call last)",
      "\u001b[0;32m<ipython-input-123-176b234597fa>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[1;32m     14\u001b[0m     \u001b[0mitem\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mgetItemDetail\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdoc\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     15\u001b[0m     \u001b[0mitem\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0minsert\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mda\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m2\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msplit\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'?'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msplit\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'/'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m3\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 16\u001b[0;31m     \u001b[0mdetail\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mappend\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mitem\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m     17\u001b[0m \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mlen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdetail\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     18\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;31mNameError\u001b[0m: name 'detail' is not defined"
     ],
     "output_type": "error"
    }
   ],
   "source": [
    "data_file=\"rong360.csv\"\n",
    "list=[]\n",
    "with open(data_file) as f:\n",
    "    data=csv.reader(f)\n",
    "    header=next(data)\n",
    "    print(header)\n",
    "    for row in data:\n",
    "        list.append(row)\n",
    "print(len(list))\n",
    "deail=[]\n",
    "for da in list:\n",
    "    print(da[2])\n",
    "    doc=getHtml(da[2], 'UTF-8')\n",
    "    item=getItemDetail(doc)\n",
    "    item.insert(0,da[2].split('?')[0].split('/')[3])\n",
    "    detail.append(item)\n",
    "print(len(detail))\n",
    "\n",
    "headers=['id','name','import_info','rate_desc','item_desc','product_commit','condition','material','pd_lilv','sucess_case']\n",
    "\n",
    "test_csv=\"rong360_detail.csv\"\n",
    "with open(test_csv,'w') as f:\n",
    "    f_csv=csv.writer(f)\n",
    "    f_csv.writerow(headers)\n",
    "    f_csv.writerows(deail)\n",
    "print('write test csv success')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.3"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
