#coding:utf-8
from bs4 import BeautifulSoup
import requests
import time
import pymongo

client = pymongo.MongoClient('localhost', 27017)
ceshi = client['ceshi']
url_list = ceshi['url_list3']
item_info = ceshi['item_info']

# spider 1

def get_links_from(channel, pages, who_sells=0):
    # http://bj.58.com/diannao/0/pn2/
    link_view = '{}{}/pn{}/'.format(channel, str(who_sells), str(pages))
    wb_data = requests.get(link_view)
    time.sleep(1)
    soup = BeautifulSoup(wb_data.text, 'lxml')
    if soup.find('td', 't'):
        for link in soup.select('td.t a.t'):
            item_link = link.get('href').split('?')[0]
            url_list.insert_one({'url': item_link})
            print(item_link)
            #get_item_info(item_link)
    else:
        pass
        # Nothing!

# spider 2
def get_item_info(url):
    wb_data = requests.get(url)
    soup = BeautifulSoup(wb_data.text, 'lxml')
    time.sleep(1)
    #print(soup.prettify()) #格式化
    no_longer_exist = '您访问的页面不存在' in soup.find('meta', content='您访问的页面不存在').get('content') if soup.find('meta').get('content') == '您访问的页面不存在' else None
    if no_longer_exist:
        pass
    else:
        title = soup.title.text
        imgs = soup.select('div.g_thumb_main > ul > li > img')
        print(imgs)
        imgList = ''
        for img in imgs:
            print(img.get('rel'))
            imgList = img.get('rel')+' ' + imgList
        price = soup.select('span.price_now')[0].text if soup.find_all('span', 'price_now') else None
        date = soup.select('.time')[0].text if soup.find_all('li', 'time') else None
        area = soup.select('div.palce_li > span > i')[0].text if soup.find_all('div', 'palce_li') else None
        item_info.insert_one({'title': title,'img': imgList, 'price': price, 'date': date, 'area': area})
        print({'title': title, 'img': imgList, 'price': price, 'date': date, 'area': area})

#get_item_info('http://bj.58.com/pingbandiannao/30469042246069x.shtml')
# url = 'http://bj.58.com/shouji/24605954621114x.shtml'
# wb_data = requests.get(url)
# wb_data.encoding = "utf-8" #乱码处理
# soup = BeautifulSoup(wb_data.text, 'lxml')
# no_longer_exist = '您访问的页面不存在' in soup.find('meta', content='您访问的页面不存在').get('content')

#get_links_from('http://bj.58.com/sanlunche/',2)