{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# python网络爬虫应用实战（第20期）第8课书面作业\n",
    "学号：115799\n",
    "\n",
    "**作业内容：**  \n",
    "尝试完成以下爬虫\n",
    "\n",
    "1. 爬取 iphone 搜索结果中，销量最高的10个商品的商品名称、店铺名称、商品评论  \n",
    "2. 爬取京东 python 相关书籍的全部商品信息（商品名称、链接、店铺等）"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 第1题"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "from selenium import webdriver\n",
    "from selenium.common.exceptions import TimeoutException\n",
    "from selenium.webdriver.common.by import By\n",
    "from selenium.webdriver.support import expected_conditions as EC\n",
    "from selenium.webdriver.support.wait import WebDriverWait\n",
    "from pyquery import PyQuery as pq\n",
    "from urllib.parse import quote\n",
    "from PIL import Image\n",
    "import urllib\n",
    "import time\n",
    "from selenium.webdriver.common.action_chains import ActionChains\n",
    "#模拟鼠标操作\n",
    "from  selenium.webdriver import ActionChains\n",
    "#键盘按键操作\n",
    "from  selenium.webdriver.common.keys import Keys\n",
    "import json\n",
    "\n",
    "##模拟登录淘宝\n",
    "username = '16621676826'\n",
    "password = 'CA99015F@sH'\n",
    "\n",
    "options = webdriver.ChromeOptions()\n",
    "user_ag = \"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36\"\n",
    "options.add_argument('user-agent=%s' % user_ag)\n",
    "browser = webdriver.Chrome(executable_path=\"chromedriver\", options=options)\n",
    "# 打开淘宝登录页面\n",
    "browser.get(\"https://login.taobao.com/member/login.jhtml?spm=a21bo.21814703.754894437.1.5af911d9tBuTtn&f=top&redirectURL=https%3A%2F%2Fwww.taobao.com%2F\")\n",
    "# 定义js代码\n",
    "script = \"Object.defineProperty(navigator,'webdriver',{get: ()=> false,});\"\n",
    "# 执行js代码\n",
    "browser.execute_script(script)\n",
    "time.sleep(2)\n",
    "browser.find_element('id','fm-login-id').send_keys(username)\n",
    "time.sleep(2)\n",
    "browser.find_element('id','fm-login-password').send_keys(password)\n",
    "time.sleep(2)\n",
    "try:\n",
    "    # 找到滑块\n",
    "    slider = browser.find_element('xpath',\"//span[contains(@class, 'btn_slide')]\")\n",
    "    # 判断滑块是否可见\n",
    "    if slider.is_displayed():\n",
    "        # 点击并且不松开鼠标\n",
    "        ActionChains(browser).click_and_hold(on_element=slider).perform()\n",
    "        # 往右边移动258个位置\n",
    "        ActionChains(browser).move_by_offset(xoffset=258, yoffset=0).perform()\n",
    "        # 松开鼠标\n",
    "        ActionChains(browser).pause(0.5).release().perform()\n",
    "except:\n",
    "    pass\n",
    "time.sleep(2)\n",
    "browser.find_element('xpath','//*[@id=\"login-form\"]/div[4]/button').click()\n",
    "time.sleep(10)\n",
    "\n",
    "\n",
    "## 全局变量\n",
    "KEYWORD = 'iphone'\n",
    "MAX_PAGE = 2\n",
    "wait = WebDriverWait(browser, 10)\n",
    "\n",
    "def get_score():\n",
    "    browser.execute_script('window.scrollTo(0, document.body.scrollHeight)')\n",
    "    try:\n",
    "        txt = browser.find_element('XPATH', '//*[@id=\"J_Reviews\"]/div/div[1]/div[1]/strong').text\n",
    "    except:\n",
    "        txt = ''\n",
    "    if len(txt)==0:\n",
    "        try:\n",
    "            txt = browser.find_element('XPATH', '//*[@id=\"shop-info\"]/div[2]/div[1]/div[2]/span').text\n",
    "        except:\n",
    "            txt = ''\n",
    "    if len(txt)==0:\n",
    "        try:\n",
    "            txt = browser.find_element('XPATH', '//*[@id=\"J_Shop-info\"]/div[2]/div[2]/div/div[1]/dd/a').text\n",
    "        except:\n",
    "            txt = ''\n",
    "    return txt\n",
    "\n",
    "def index_page(page):\n",
    "    \"\"\"\n",
    "    抓取索引页\n",
    "    :param page: 页码\n",
    "    \"\"\"\n",
    "    print('正在爬取第', page, '页')\n",
    "    try:\n",
    "        url = 'https://s.taobao.com/search?q=' + quote(KEYWORD)\n",
    "        browser.get(url)\n",
    "        if page > 1:\n",
    "            input = wait.until(\n",
    "                EC.presence_of_element_located((By.CSS_SELECTOR, '#mainsrp-pager div.form > input')))\n",
    "            submit = wait.until(\n",
    "                EC.element_to_be_clickable((By.CSS_SELECTOR, '#mainsrp-pager div.form > span.btn.J_Submit')))\n",
    "            input.clear()\n",
    "            input.send_keys(page)\n",
    "            submit.click()\n",
    "        wait.until(\n",
    "            EC.text_to_be_present_in_element((By.CSS_SELECTOR, '#mainsrp-pager li.item.active > span'), str(page)))\n",
    "        wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, '.m-itemlist .items .item')))\n",
    "        return get_products()\n",
    "    except TimeoutException:\n",
    "        print('timeout!')\n",
    "\n",
    "\n",
    "def get_products():\n",
    "    \"\"\"\n",
    "    提取商品数据\n",
    "    \"\"\"\n",
    "    links = browser.find_elements('xpath', \"//a[@class='pic-link J_ClickStat J_ItemPicA']\")\n",
    "    pj = []\n",
    "    for link in links:\n",
    "        link.click()\n",
    "        ws = browser.window_handles\n",
    "        browser.switch_to.window(ws[1])\n",
    "        pj.append(get_score())\n",
    "        browser.close()\n",
    "        browser.switch_to.window(ws[0])\n",
    "\n",
    "    html = browser.page_source\n",
    "    doc = pq(html)\n",
    "    items = doc('#mainsrp-itemlist .items .item').items()\n",
    "    products =[]\n",
    "    for item in items:\n",
    "        deal = item.find('.deal-cnt').text()\n",
    "        deal = deal.replace('人付款','').replace('+','').replace('万','0000')\n",
    "        if len(deal)==0:\n",
    "            deal = '0'\n",
    "        product = {\n",
    "            # 'image': item.find('.pic .img').attr('data-src'),\n",
    "            'price': item.find('.price').text(),\n",
    "            'deal': int(deal),\n",
    "            'title': item.find('.title').text(),\n",
    "            'shop': item.find('.shop').text(),\n",
    "            'location': item.find('.location').text()\n",
    "        }\n",
    "        print(product)\n",
    "        products.append(product)\n",
    "    return products\n",
    "\n",
    "products = index_page(1) #尝试只爬取1页\n",
    "products.sort(key=lambda x:x['deal'],reverse=True)\n",
    "print(products[:10]) #打印销量前10的商品"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "运行截图如下 ："
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "[![jIJQ2V.jpg](https://s1.ax1x.com/2022/07/17/jIJQ2V.jpg)](https://imgtu.com/i/jIJQ2V)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 第2题"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "开始爬取\n",
      "正在爬取第 1 页......\n",
      "正在爬取第 2 页......\n",
      "正在爬取第 3 页......\n",
      "正在爬取第 4 页......\n",
      "正在爬取第 5 页......\n",
      "正在爬取第 6 页......\n",
      "正在爬取第 7 页......\n",
      "正在爬取第 8 页......\n",
      "正在爬取第 9 页......\n",
      "结束爬取\n"
     ]
    }
   ],
   "source": [
    "from selenium import webdriver\n",
    "from selenium.webdriver.support.wait import WebDriverWait\n",
    "from selenium.webdriver.support import expected_conditions as EC\n",
    "from selenium.webdriver.common.by import By\n",
    "import selenium.common.exceptions\n",
    "from requests.utils import quote\n",
    "import time\n",
    "\n",
    "class JdSpider():\n",
    "    def open_file(self):\n",
    "        self.fd = open('Jd.txt','w',encoding='utf-8')\n",
    "\n",
    "    def open_browser(self):\n",
    "        self.browser = webdriver.Chrome()\n",
    "        self.browser.implicitly_wait(10)\n",
    "        self.wait = WebDriverWait(self.browser,10)\n",
    "\n",
    "    def init_variable(self):\n",
    "        self.page = 1\n",
    "        self.maxpage = 10 #\n",
    "        self.data = zip()\n",
    "        self.isLast = False\n",
    "\n",
    "    def parse_page(self):\n",
    "        try:\n",
    "            skus = self.wait.until(EC.presence_of_all_elements_located((By.XPATH,'//li[@class=\"gl-item\"]')))\n",
    "            skus = [item.get_attribute('data-sku') for item in skus]\n",
    "            links = ['https://item.jd.com/{sku}.html'.format(sku=item) for item in skus]\n",
    "            prices = self.wait.until(EC.presence_of_all_elements_located((By.XPATH,'//div[@class=\"gl-i-wrap\"]/div[2]/strong/i')))\n",
    "            prices = [item.text for item in prices]\n",
    "            names = self.wait.until(EC.presence_of_all_elements_located((By.XPATH,'//div[@class=\"gl-i-wrap\"]/div[3]/a/em')))\n",
    "            names = [item.text for item in names]\n",
    "            comments = self.wait.until(EC.presence_of_all_elements_located((By.XPATH,'//div[@class=\"gl-i-wrap\"]/div[4]/strong')))\n",
    "            comments = [item.text for item in comments]\n",
    "            self.data = zip(links,prices,names,comments)\n",
    "        except selenium.common.exceptions.TimeoutException:\n",
    "            print('parse_page: TimeoutException')\n",
    "            self.parse_page()\n",
    "        except selenium.common.exceptions.StaleElementReferenceException:\n",
    "            print('parse_page: StaleElementReferenceException')\n",
    "            self.browser.refresh()\n",
    "\n",
    "    def turn_page(self):\n",
    "        try:\n",
    "            self.wait.until(EC.element_to_be_clickable((By.XPATH,'//a[@class=\"pn-next\"]'))).click()\n",
    "            time.sleep(1)\n",
    "            self.browser.execute_script(\"window.scrollTo(0,document.body.scrollHeight)\")\n",
    "            time.sleep(2)\n",
    "            self.page += 1\n",
    "            if self.page >= self.maxpage:\n",
    "                self.isLast = True\n",
    "        except selenium.common.exceptions.NoSuchElementException:\n",
    "            self.isLast = True\n",
    "        except selenium.common.exceptions.TimeoutException:\n",
    "            print('turn_page: TimeoutException')\n",
    "            self.turn_page()\n",
    "        except selenium.common.exceptions.StaleElementReferenceException:\n",
    "            print('turn_page: StaleElementReferenceException')\n",
    "            self.browser.refresh()\n",
    "\n",
    "    def write_to_file(self):\n",
    "        for item in self.data:\n",
    "            self.fd.write('----------------------------------------\\n')\n",
    "            self.fd.write('link：' + str(item[0]) + '\\n')\n",
    "            self.fd.write('price：' + str(item[1]) + '\\n')\n",
    "            self.fd.write('name：' + str(item[2]) + '\\n')\n",
    "            self.fd.write('comment：' + str(item[3]) + '\\n')\n",
    "\n",
    "    def close_file(self):\n",
    "        self.fd.close()\n",
    "\n",
    "    def close_browser(self):\n",
    "        self.browser.quit()\n",
    "\n",
    "    def crawl(self, keyword):\n",
    "        self.open_file()\n",
    "        self.open_browser()\n",
    "        self.init_variable()\n",
    "        print('开始爬取')\n",
    "        self.browser.get('https://search.jd.com/Search?keyword='+quote(keyword)+'&enc=utf-8')\n",
    "        time.sleep(1)\n",
    "        self.browser.execute_script(\"window.scrollTo(0,document.body.scrollHeight)\")\n",
    "        time.sleep(2)\n",
    "        count = 0\n",
    "        while not self.isLast:\n",
    "            count += 1\n",
    "            print('正在爬取第 ' + str(count) + ' 页......')\n",
    "            self.parse_page()\n",
    "            self.write_to_file()\n",
    "            self.turn_page()\n",
    "        self.close_file()\n",
    "        self.close_browser()\n",
    "        print('结束爬取')\n",
    "\n",
    "spider = JdSpider()\n",
    "spider.crawl('python')\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "截图如下 ："
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "[![jI1yh6.png](https://s1.ax1x.com/2022/07/17/jI1yh6.png)](https://imgtu.com/i/jI1yh6)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.7"
  },
  "vscode": {
   "interpreter": {
    "hash": "9f35b62a1d17a2b36b9c54ddf6a1c189fdf51f8ec8cea898ba9fed29bc45b6fd"
   }
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
