# -*-coding:utf-8 -*-
'分页数据爬取存储到MongoDB数据库内'

# 第一步，找到要爬取的网页地址，并找到每页相同点和不同点
# http://www.yingjiesheng.com/commend-fulltime-1.html
import pymongo
import time
import requests
from lxml import etree

# 第二步，设置MongoDB数据库信息
client = pymongo.MongoClient('localhost')
# 设置数据库名
db = client['scau']
# 指定集合名（表）
index = 'jobinfo'


# 第三步，爬虫代码
def getPageInfo():
    # 这里的{}是预留后期替换的
    url = 'http://www.yingjiesheng.com/commend-fulltime-{}.html'
    for i in range(1, 3):  # 1,2
        # 把{}替换成i
        url = url.format(i)
        response = requests.get(url=url)
        response.encoding = 'gbk'
        tree = etree.HTML(response.text)
        list = tree.xpath("//table[@class='jobul']/tr")
        for li in list:
            text1 = li.xpath("./td/a/text()")
            text2 = li.xpath("./td/span/text()")
            if len(text1) != 2 or len(text2) != 2:
                # 这里是出错（最后的时候）的情况
                print(text1, text2)
                continue
            else:
                # 存入数据库
                cName = text1[0]
                jobname = text1[1]
                address = text2[0]
                publist_Date = text2[1]
                # 字典封装
                add_info = {
                    'name': cName,
                    'job': jobname,
                    'address': address,
                    'publist_Date': publist_Date
                }
                db[index].insert(add_info)
        time.sleep(2)  # sleep()睡眠函数，参数为睡眠时间，单位秒


if __name__ == '__main__':
    getPageInfo()
