from selenium import webdriver
from bs4 import BeautifulSoup
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from time import sleep

from pymongo import MongoClient

from config import *
from tongcheng.channel_extract import *

# mobile
import requests

broswer = webdriver.PhantomJS(service_args=SERVICE_ARGS)

wait = WebDriverWait(broswer, 10)

client = MongoClient('localhost', 27017)
db = client[MONGO_DB_58]
channel_info = db[MONGO_TABLE_TONGCHENG_CHANNELS]


# 通过分类URL, 获取该分类下的所有发帖的url(此处用的是移动端的接口)
# @url 分类的url
# @pages 页码
def get_urls_mobile_from(url, pages):
    list_view = '{}/pn{}/'.format(url, str(pages))
    html = requests.get(list_view)
    soup = BeautifulSoup(html.text, 'lxml')

    if soup.find('.nobg'):   # 没有数据, 返回
        return

    list = soup.select('.asynInfo .zhuanzhuan > li > a')
    for li in list:
        href = li.get('href').split('?')[0]

        data = {
            'href' : href
        }

        url_pc = href.replace('m.', '')
        if url_pc == 'http://jump.zhineng.58.com/jump':
            continue
        parse_detail_from(url_pc)
        save_channel_to_mongo(data)
        sleep(1)

def get_details():
    results = channel_info.find()
    for i in results:
        url_pc = (i['href']).replace('m.', '')
        if url_pc == 'http://jump.zhineng.58.com/jump':
            continue
        parse_detail_from(url_pc)
        sleep(2)

def parse_detail_from(url):
    try:
        html = requests.get(url)
        soup = BeautifulSoup(html.text, 'lxml')
        print(soup.find('title').get_text())
        if soup.find('title').get_text() == '请输入验证码':
            print('操作太频繁了,需要用ip代理了')
        title = soup.select('.info_titile')[0].get_text()
        view = soup.select('.look_time')[0].get_text()[0:-3]
        area = soup.select('div.palce_li > span > i')[0].get_text().split('-')

        if len(area) == 2:
            area = area[1]
        elif len(area) == 1:
            area = area[0]
        else:
            area = '不明'

        cate = list(soup.select('div.breadCrumb.f12')[0].stripped_strings)

        good = {
            'url' : url,
            'title' : title,
            'view' : view,
            'area' : area,
            'cate' : cate
        }

        save_goods_to_mongo(good)

    except TimeoutException:
        parse_detail_from(url)

def parse_url(url, pages, who_sells=0):
    list_view = '{}{}/pn{}/'.format(url, str(who_sells), str(pages))
    broswer.get(list_view)
    try:
        wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, '#infolist')))
        web_data = broswer.page_source
        get_urls_from(web_data)
    except TimeoutException:
        parse_url(url)

def get_urls_from(web_data):
    soup = BeautifulSoup(web_data, 'lxml')

    if soup.find('td', 't'):

        items = soup.select('a.t')
        for item in items:

            # 这里先用奔办法去除,系统推荐以及回收类的商品 todo: 优化
            id_ = (item.get('href').split('?')[0]).split('/')[-1]

            if id_ == 'jump':
                continue

            data = {
                'title' : item.get_text(),
                'url' : item.get('href').split('?')[0]
            }
            get_detail(data['title'], data['url'])
            save_channel_to_mongo(data)
    else:
        # It's the last page !
        pass

def next_page(page_number):
    print('正在翻页', page_number)
    try:
        submit = wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, 'a.next > span')))
        submit.click()
        wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, '#infolist')))
        get_urls_from(broswer.page_source)

    except TimeoutException:
        next_page(page_number)

def get_detail(title,url):
    html = requests.get(url)

    soup = BeautifulSoup(html.text, 'lxml')

    title = title
    price = soup.select('div.price_li > span > i')[0].get_text()
    views = soup.select('span.look_time')[0].get_text()[:-3]
    address = soup.select('div.palce_li > span > i')[0].get_text()
    cate = list(soup.select('div.biaoqian_li')[0].stripped_strings)

    goods = {
        'title' : title,
        'price' : price,
        'address' : address,
        'views' : views,
        'cate' : cate
    }

    save_goods_to_mongo(goods)

def save_goods_to_mongo(data):
    try:
        if db[MONGO_TABLE_58].insert(data):
            print('插入MONGODB成功', data)
    except Exception:
        print('插入MONGODB失败', data)

def save_channel_to_mongo(data):
    try:
        if channel_info.insert(data):
            print('插入MONGODB成功', data)
    except Exception as e:
        print(e)
        print('插入MONGODB失败', data)

def main():
    try:
        channels = get_channel_list()
        for channel in channels:
            print(channel)
            for i in range(8):
                get_urls_mobile_from(channel, i)
                sleep(2)
            sleep(2)
    except Exception as e:
        print(e)
        print('出错啦!')


if __name__ == '__main__':
    main()