#!/usr/bin/env python3

import os
import sys
import time
import pymongo
import requests
from bs4 import BeautifulSoup

# 58同城二手市场：http://bj.58.com/sale.shtml

client = pymongo.MongoClient('localhost', 27017)
shoujihao = client['shoujihao']
list = shoujihao['list']
info = shoujihao['info']


def get_shoujihao_from_page(page = 1):
    if page < 1:
        return -1
    shoujihao_page = 'http://bj.58.com/shoujihao/pn' + str(page)

    try:
        webdata = requests.get(shoujihao_page)
        if (webdata.status_code != 200):
            webdata.raise_for_status()
    except Exception % e:
        print("error while get shoujihao from", shoujihao_page)
        return 0

    soup = BeautifulSoup(webdata.text, 'lxml')
    cnt = soup.select('span b')
    shoujihao_titles = soup.select('strong.number')
    shoujihao_urls = soup.select('a.t')

    if 0 == int(cnt[0].get_text()):
        return -1

    # ignore 精准。靓号优选 items that only in the first page.
    if page == 1:
        jztgs = soup.select('span.jz-title')
        #dings = soup.select('a.ico.ding')
        ignore_cnt = len(jztgs) #+ len(dings)
        shoujihao_titles = shoujihao_titles[ignore_cnt:]
        shoujihao_urls = shoujihao_urls[ignore_cnt:]

    idx = 0
    for title, url in zip(shoujihao_titles, shoujihao_urls):
        data = {
                'title' : title.get_text(),
                'url' : url.get('href')
                }
        idx += 1
        print("#%d-%d insert" % (page, idx), data)
        list.insert_one(data)
    return idx


def get_infoid(url):
    # 从'http://bj.58.com/pingbandiannao/26156183957822x.shtml'中获取26156183957822
    idx_end = url.find('x.shtml')
    idx_start = idx_end - 14 # id长度为14个数字
    return url[idx_start : idx_end]

def get_views(url):
    view_url_prefix = 'http://jst1.58.com/counter?infoid='
    headers = {
            'Referer': url
            }
    infoid = get_infoid(url)
    url = view_url_prefix + infoid
    try:
        webdata = requests.get(url, headers = headers, timeout = 5)
        if webdata.status_code != 200:
            webdata.raise_for_status()
    except Exception % e:
        print('get goods view error:', e)

    return webdata.text[webdata.text.rfind('=') + 1 : ]


def get_address(address):
    address_encoded = ''
    if len(address) == 0:
        address_encoded = "未知"
    else:
        for a in address:
            address_encoded += a.get_text()
    
    return address_encoded


def get_shoujihao_info_from(item):
    try:
        webdata = requests.get(item['url'], timeout = 5)
        if webdata.status_code != 200:
            webdata.raise_for_status()
    except Exception % e:
        print("error while get shoujihao info from", url)

    soup = BeautifulSoup(webdata.text, 'lxml')
    
    # 价格, #号表示id, #content表示匹配id='content'
    price = soup.select('#content span.price')

    # 卖家地址, .号表示class, .su_con表示匹配class='su_con'
    address = soup.select('.su_con > a')

    # 发布时间, .号表示class, .time表示匹配class='time'
    time = soup.select('.time')

    # 卖家
    seller = soup.select('#main li > a.tx')

    # 联系方式
    connection = soup.select('.f20.c_f50')

    #print(price, address, time, sep = '\n====\n')
    data = {
            'title' : item['title'],
            'view' : get_views(item['url']),
            'price' : price[0].get_text().replace('\n', '').replace('\t', '').replace(' ', '').strip(),
            'address' : get_address(address),
            'time' : time[0].get_text().strip(),
            'seller' : seller[0].get_text(),
            'connection' : connection[0].get_text().strip()
            }
    info.insert_one(data)


def main():
    cnt = 0
    pn = 1
    while cnt != -1:
        cnt = get_shoujihao_from_page(pn)
        pn += 1
    else:
        print("%d pages parsed" % (pn - 1))

    cnt = 1
    for item in list.find():
        get_shoujihao_info_from(item)
        print('parse #%d: %s' % (cnt, item['title']))
        cnt += 1
        time.sleep(1)

if __name__ == '__main__':
    main()

