#!/usr/bin/env python3

import os
import sys
from bs4 import BeautifulSoup
import requests
import time

urls = ['http://bj.58.com/pbdn/0/pn{}'.format(str(i)) for i in range(1, 51)]


def get_infoid(url):
    # 从'http://bj.58.com/pingbandiannao/26156183957822x.shtml'中获取26156183957822
    return url.split('/')[-1].split('.')[0][0 : -1]


def get_goods_view(url):
    """浏览量是通过js获得的.通过wireshark抓包发现(刷新网页，浏览器调试模式下Network挨个请求查找也可以找到)，
是请求http://jst1.58.com/counter?infoid=26156183957822&userid=&uname=&sid=517689187&lid=1&px=517429987&cfpath=5,38484这个api获得的。
通过浏览器调试模式的Network发现，找到请求头。
经过实验又发现，请求头中只需要Referer一项,其它可忽略;而请求地址只需要infoid这一个参数,其它可忽略。
观察可以发现，infoid就在商品页url中，解析得到之即可。
返回结果格式为Counter58.userlist[0]={uid:'0',uname:'',face:'',vt:''};Counter58.total=5217,其中的5217就是我们需要的浏览量
"""
    promotion_goods_view_url_prefix = 'http://jst1.58.com/counter?infoid='
    headers = {
            'Referer': url
            }
    infoid = get_infoid(url)
    url = promotion_goods_view_url_prefix + infoid
    try:
        webdata = requests.get(url, headers = headers, timeout = 5)
        if webdata.status_code != 200:
            webdata.raise_for_status()
    except Exception % e:
        print('get goods view error:', e)

    return webdata.text[webdata.text.rfind('=') + 1 : ]


def get_zhuanzhuan_goods(url):
    try:
        webdata = requests.get(url, timeout = 5)
        if webdata.status_code != 200:
            webdata.raise_for_status()
    except Exception % e:
        print('get zhuanzhuan goods error:', e)

    soup = BeautifulSoup(webdata.text, 'lxml')
    
    name = soup.select('div.box_left_top > h1')
    cate = soup.select('span.crb_i')
    view = soup.select('span.look_time')
    price = soup.select('span.price_now')
    address = soup.select('div.palce_li')

    data = {
            'name' : name[0].get_text(),
            'cate' : cate[0].get_text().replace('\n', ''),  #所在类目
            'view' : view[0].get_text(),
            'price' : price[0].get_text(),
            'address' : address[0].get_text().replace('\n', ''),
            'time' : 'Unknown', # 发布时间，转转二手没有发布时间
            'condition': "Unknown" # 成色，转转二手没有成色
            }
    print("转转商品:\n  ", data)
    return data


def get_address(address):
    if len(address) == 0:
        return "未知"
    else:
        return ''.join(list(address[0].stripped_strings))


def get_promotion_goods(url):
    try:
        webdata = requests.get(url, timeout = 5)
        if webdata.status_code != 200:
            webdata.raise_for_status()
    except Exception % e:
        print('get promotion goods error:', e)

    soup = BeautifulSoup(webdata.text, 'lxml')
    
    name = soup.select('div.col_sub.mainTitle > h1')
    cate = soup.select('span.crb_i')
    #price = soup.select('#content > div.person_add_top.no_ident_top > div.per_ad_left > div.col_sub.sumary > ul > li:nth-of-type(1) > div.su_con > span')
# #号表示id, #content表示匹配id='content'
    price = soup.select('#content span.price')
    #address = soup.select('div.su_con > span.c_25d')
    address = soup.select('.c_25d')
    #time = soup.select('li.time')
# .号表示class, .time表示匹配class='time'
    time = soup.select('.time')
    condition = soup.select('#content > div.person_add_top.no_ident_top > div.per_ad_left > div.col_sub.sumary > ul > li:nth-of-type(2) > div.su_con')

    data = {
            'name' : name[0].get_text().replace('\n', ''),
            'cate' : cate[0].get_text().replace('\n', ''),  #所在类目
            'view' : get_goods_view(url),
            'price' : price[0].get_text() + '元',
            'address' : get_address(address),
            'time' : time[0].get_text(),
            'condition': list(condition[0].stripped_strings)[0]
            }
    print("精品推广:\n  ", data)
    return data


def get_jztg_goods_url(url):
    try:
# 不让requests自动跳转，我们受动捕获跳转地址
        webdata = requests.get(url, timeout = 5, allow_redirects=False)
        if webdata.status_code != 200:
            webdata.raise_for_status()
    except Exception % e:
        print('get jztg goods url error:', e)

    return webdata.headers['Location']


def get_goods_urls(url):
    goods = []
    try:
        webdata = requests.get(url, timeout = 5)
        if webdata.status_code != 200:
            webdata.raise_for_status()
    except Exception % e:
        print('get goods urls error:', e)

    soup = BeautifulSoup(webdata.text, 'lxml')
    goods_urls = soup.select('td.t > a.t')
    for goods_url in goods_urls:
# rel标签值为nofollow的为精准推广，不含rel标签的为转转
        rel = goods_url.get('rel')
        if rel == ['nofollow']:
            data = {
                    'origin': '精准推广',
# 精准推广的商品url为跳转地址。
# 因为requests支持跳转，因此可以直接使用改地址。
# 但是为了后面获取浏览量，我们还是手动解析其真是地址
                    'url': get_jztg_goods_url(goods_url.get('href'))
                    #'url': goods_url.get('href')
                    }
            goods.append(data)
        elif rel == None:
            data = {
                    'origin': '转转',
                    'url': goods_url.get('href')
                    }
            goods.append(data)
    return goods 

"""
def get_origin_flag(url):
    return url[url.find('.shtml') - 1]

def get_goods_origin(url):
    flag = get_origin_flag(url)
    print(url)
    print(flag)
    if flag == 'z':
        return '转转'
    elif flag == 'x':
        return '精准推广'
    else:
        return '未知'
"""

def main(): 
    for url in urls:
        goods_urls = get_goods_urls(url)
        for goods_url in goods_urls:
            origin = goods_url['origin']
            if origin == '转转':
                get_zhuanzhuan_goods(goods_url['url'])
            elif origin == '精准推广':
                get_promotion_goods(goods_url['url'])
            else:
                print("Unknown goods origin")
            time.sleep(1)

if __name__ == '__main__':
    main()

