# -*- coding: utf-8 -*-
# -文件说明 淘宝搜索-
# @Time    : 2019/8/8 16:44
# @Author  : hwx
# @Email   : @163.com
# @File    : 爬虫demo5_淘宝搜索.py
# @Software: PyCharm
# https://s.taobao.com/search?initiative_id=tbindexz_20170306&ie=utf8&spm=a21bo.2017.201856-taobao-item.2&
# sourceId=tb.index&search_type=item&ssid=s5-e&commend=all&imgfile=&q=%E7%BA%A2%E6%A5%BC%E6%A2%A6&suggest=0_1&_input_charset=utf-8&wq=hongloum&suggest_query=hongloum&source=suggest

# https://s.taobao.com/search?initiative_id=tbindexz_20170306&ie=utf8&spm=a21bo.2017.201856-taobao-item.2&sourceId=tb.index&search_type=item&ssid=s5-e&commend=all&imgfile=&
# q=%E7%BA%A2%E6%A5%BC%E6%A2%A6&suggest=0_1&_input_charset=utf-8&wq=hongloum&suggest_query=hongloum&source=suggest
# https://s.taobao.com/search?initiative_id=tbindexz_20170306&ie=utf8&spm=a21bo.2017.201856-taobao-item.2&sourceId=tb.index&search_type=item&ssid=s5-e&commend=all&imgfile=&
# q=%E7%BA%A2%E6%A5%BC%E6%A2%A6&suggest=0_1&_input_charset=utf-8&wq=hongloum&suggest_query=hongloum&source=suggest&bcoffset=3&ntoffset=3&p4ppushleft=1%2C48&s=44
# https://s.taobao.com/search?initiative_id=tbindexz_20170306&ie=utf8&spm=a21bo.2017.201856-taobao-item.2&sourceId=tb.index&search_type=item&ssid=s5-e&commend=all&imgfile=&
# q=%E7%BA%A2%E6%A5%BC%E6%A2%A6&suggest=0_1&_input_charset=utf-8&wq=hongloum&suggest_query=hongloum&source=suggest&bcoffset=0&ntoffset=6&p4ppushleft=1%2C48&s=88
import re
from urllib import parse

import requests
from bs4 import BeautifulSoup


def urlcode(p):
    """
urlcode编码
    :param p:
    :return:
    """
    # p = '红楼梦'
    # parse.quote(p)
    print('关键词：<', p, '>经过urlcode编码后转换为<', parse.quote(p), '>')
    return parse.quote(p)
    # print(parse.unquote(p)) # 解码


def getHTMLtext(url):
    try:
        '''
         因为淘宝反爬虫机制,必须有cookie，和user-agent
        '''
        headersmsg = {
            'user-agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.103 Safari/537.36',
            'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3',
            'cookie': 'thw=cn; cna=T5QxFT75MFwCAd9SzD64M3Tw; tracknick=%5Cu517C%5Cu804C%5Cu7684%5Cu5C0F%5Cu9EC4; _cc_=UIHiLt3xSw%3D%3D; tg=0; enc=dtrgSGswkvO%2FsWQ%2FZj4vxeXZyW74wg%2FFFHIKbCvJMyKyl5N%2B7Iv6P046Z8TAYAhnYHwQ3I%2FaxNlU52fyWCq8FQ%3D%3D; hng=CN%7Czh-CN%7CCNY%7C156; x=e%3D1%26p%3D*%26s%3D0%26c%3D0%26f%3D0%26g%3D0%26t%3D0; miid=374127122081873098; v=0; t=643574ecf918da2f3deafbfb77874196; cookie2=11997510615e192d0ce4bfe81d228ba2; _tb_token_=e85e170df53d7; alitrackid=www.taobao.com; lastalitrackid=www.taobao.com; JSESSIONID=052B40B78F375AE49686595F78ED6957; l=cBrYfBbrvnV-ct4BBOCgquI8mf7tIIRAguPRwEivi_5Qa6L6pMbOkDwuMFp6cjWdteLB4keiqTy9-etkiKy06Pt-g3fP.; isg=BKys-oQV3VJ4NMjoRirOQNIlfYoezTR4Qz5wVgbtvtf6EUwbLnFnn_GjMZkMmYhn'}
        r = requests.get(url, headers=headersmsg, timeout=30)  # 请用get，
        print('状态码：' + str(r.status_code))
        print(r.request.url)
        r.raise_for_status()  # 如果不是200，抛出异常
        r.encoding = r.apparent_encoding
        return r.text
    except Exception as e:    #
        print("运行时遇到错误：", e)


def getitemlist(html, ulist):
    # soup = BeautifulSoup(html, 'html.parser')
    # print(html)
    # titles = re.findall(r'\"raw_title\"\:\".*?\"', html)
    # print('titles:', titles)
    plt = re.findall(r'\"view_price\"\:\"[\d\.]*\"', html)
    tlt = re.findall(r'\"raw_title\"\:\".*?\"', html)
    for i in range(len(plt)):   # 注意转义符'\'是否在里面，否则会出错
        price = eval(plt[i].split(':')[1])  # eval去引号
        if '\\' in tlt[i]:
            title = eval(tlt[i].replace('\\', '|').split(':')[1])
            # break
        else:
            title = eval(tlt[i].split(':')[1])
        ulist.append([price, title])
    return ulist


def printinfo(ilt):
    tplt = "{:4}\t{:8}\t{:16}"
    print(tplt.format("序号", "价格", "商品名称"))
    count = 0
    for g in ilt:
        count = count + 1
        print(tplt.format(count, g[0], g[1]))


def main():
    kw = str(input('请输入关键词：'))
    goods = urlcode(kw)     # 转成urlcode编码
    pagenum = 2     # 页数
    url = 'https://s.taobao.com/search?initiative_id=tbindexz_20170306&ie=utf8&spm=a21bo.2017.201856-taobao-item.2&sourceId=tb.index&search_type=item&ssid=s5-e&commend=all&imgfile=&' \
          'q=' +\
          goods +\
          '&suggest=0_1&_input_charset=utf-8&wq=hongloum&suggest_query=hongloum&source=suggest'
    url2 = 'https://s.taobao.com/search?q=' + goods   # 不行
    infolist = []

    for i in range(pagenum):
        try:
            realurl = url + '&s=' + str(44 * i)
            html = getHTMLtext(realurl)
            getitemlist(html, infolist)
        except BaseException:
            continue
    printinfo(infolist)


main()
