from bs4 import BeautifulSoup #导入BeautifulSoup库
import requests #导入requests库，用来读取在线网页数据的
import pymongo #导入pymongo用于处理mongoDB数据库
import time

### page_parsing：使用mongoDB快速找到自己需要的租房信息
##  时间：2017年10月17日

### 创建数据库和数据表
client = pymongo.MongoClient("localhost", 27017)
tongcheng = client["tongcheng"]
url_list = tongcheng["url_list"]
item_info = tongcheng["item_info"]

## 获取链接
def get_urls(channel, pages, who_sells=0):
    ## 完整链接http://hf.58.com/diannao/0/pn3/
    list_view = '{}{}/pn{}'.format(channel,str(who_sells),str(pages))
    web_data = requests.get(list_view)
    time.sleep(1)
    soup = BeautifulSoup(web_data.text,'lxml')
    if soup.find("noinfotishi"): ## 判断是否到了最后一页，没有信息了
        pass
    else:
        for link in soup.select("td.t > a.t"):
            item_link = link.get('href').split('?')[0]
            if "jump" in item_link:  ## 判断是否跳转网页，暂时无法处理
                pass
            else:
                url_list.insert_one({'url':item_link})  ## 把数据插入数据库
                print(item_link)

### 爬取所需要的信息
def get_info(url):
    web_data = requests.get(url)
    soup = BeautifulSoup(web_data.text,'lxml')
    title = soup.title.text
    price = soup.select("span.price.c_f50")[0].text.strip().strip(" 元")
    date = soup.select(".time")[0].text.strip(" 发布")
    area = soup.select("div.su_con > a")[0].text.strip()
    item_info.insert_one({
        "title":title,
        "price":price,
        "date":date,
        "area":area
    })

channel_list =[ '''
http://hf.58.com/yishu/
http://hf.58.com/tushu/
http://hf.58.com/bangong/
http://hf.58.com/chengren/
''']

# get_urls("http://hf.58.com/bangong/",0,1)
# url = "http://hf.58.com/bangong/31716758934088x.shtml"
# get_info(url)