﻿# coding=utf-8
# coding = utf-8
# 更改起始网页和搜索关键字
import requests
from bs4 import BeautifulSoup as bs
import sys

reload(sys)
sys.setdefaultencoding('utf-8')

start_web = 'https://s.2.taobao.com/list/list.htm?spm=2007.1000261.641120289.3.2572d968NAopC6&st_edtime=1&q=%C4%DA%B4%E6&ist={page}'  # 起始网页
keywords = '黄马甲'  # 搜索关键字

matchQueue = {}  # 符合队列，符合关键字的搜索结果会被添加到里面 逻辑判断部分

description_title = []  # 把描述和标题结合起来
clock = 0  # 用于控制流程
page = 0  # 用于换页

page_info = {}  # 储存单页信息的字典

while page < 100:
    r = requests.get(start_web.format(page=page))  # ______初始化部分 爬取网页部分
    print('加载初始页面成功')
    rText = r.text  # ______初始网页文本化搜索第一次的url
    soup = bs(rText, 'lxml')  # _______用bs模块来解析html

    description = soup.find('ul', class_='item-lists')\
        .find_all('div', class_='item-description')  # 商品描述

    get_title = soup.find('ul', class_='item-lists').find_all('h4')  # 获取标题
    title_h4 = bs(str(get_title), 'lxml')
    title = title_h4.find_all('a')

    link = soup.find('ul', class_='item-lists').find_all('a')  # 获取link

    for temp in title:  # 用于获取标题并添加到列表里
        description_title.append(temp.get_text())
    for i, temp in enumerate(description):  # 用于获取描述并添加到列表里
        description_title[clock] = description_title[i] + temp.get_text()

    for i, temp in enumerate(title):  # 把标题和描述整合起来对应link
        temp2 = temp.get('href')
        page_info['https:' + temp2] = description_title[i]

    print('正在匹配关键字')
    for temp in page_info:
        if keywords in page_info[temp]:
            print('成功匹配到关键字: %s' % keywords)
            matchQueue[temp] = page_info[temp]

    description_title = []

    next_page = soup.find('a', class_="paginator-next")  # 换页
    next_page_link = next_page.get('href')
    start_web = 'https:' + next_page_link

    print(page)
    print(matchQueue)
    page_info = {}  # 用于清零
    page += 1  # 换页
