from bs4 import BeautifulSoup
import pymongo
import requests
import time
from channel_extract import channel_list

## 创建数据库
client = pymongo.MongoClient("localhost", 27017)
ganji = client["ganji"]  #数据库名
url_list = ganji["url_list"] #所有要爬取的网址
item_info = ganji["item_info"] #所有需要的信息

## 把所有要爬取的网址都存入url_list
def get_url_list(channel, page):
    #网址形式：https://3g.ganji.com/hf_jzxiaoshigong/o_4/
    start_url = channel + "o_{}/".format(str(page))
    prefix_url ="https://3g.ganji.com"
    headers = {
        "User-Agent":"Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Mobile Safari/537.36",
        "Connection":"keep-alive"
    }
    web_data = requests.get(start_url,headers = headers)
    web_data.encoding ="utf-8"
    soup = BeautifulSoup(web_data.text, "lxml")
    for item in soup.select("div.inforBox > a.infor"):
        url = prefix_url + item["href"].split("?")[0]
        try:
            url_list.insert_one({"url":url})  # 把网址插入数据库中
        finally:
            pass

### 获得某个url的具体信息
def get_item_info(url):
    ## 首先判断是否下载过
    headers = {
        "User-Agent": "Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Mobile Safari/537.36",
        "Connection": "keep-alive"
    }
    web_data = requests.get(url, headers=headers)
    web_data.encoding = "utf-8"
    soup = BeautifulSoup(web_data.text, "lxml")
    try:
        title = soup.select("#detail_info > div.detail-p01 > h1.title")[0].text.strip()
        price = soup.select("#detail_info > div.detail-p01 > div.fubt.f12.clear.detail-desc > div.fl.fc-red")[
            0].text.strip()
        table_content = soup.find("table", attrs={"class": "table"})
        job_content = [td.text.strip() for td in table_content.findAll("td")]
        item_info.insert_one({
            "url": url,
            "title": title,
            "price": price,
            "job_type": job_content[0],
            "requirement": list(job_content[1].split()),
            "job_place": job_content[2]
        }
        )
    finally:
        pass

channel_list = list(set(channel_list.split()))


# test_item_url ="https://3g.ganji.com/hf_jzjiuba/2805526795x"
# get_item_info(test_item_url)