#coding:utf-8
import random
import time

from bs4 import BeautifulSoup
import requests

from util.common_util import print_line, get_now_time
from util.mongodb_client import set_mongodb_connect
from constants.url_58 import dataBaseInfo, itemLinkSheet

'''
url = 'http://bj.58.com/pbdn/0/pn'

headers = {
    'Cookie': 'f=n; ipcity=sz%7C%u6DF1%u5733; userid360_xml=EB73F4126BFB8E2B08C4DF130A963B19; time_create=1519089974253; myfeet_tooltip=end; sessionid=14cb9943-f4b2-474a-8828-101d617ded72; id58=c5/njVpj7DU8b+cjCoE0Ag==; commontopbar_new_city_info=1%7C%E5%8C%97%E4%BA%AC%7Cbj; 58tj_uuid=b4b04626-26f7-4c9d-98eb-965f6beba064; new_session=1; new_uv=1; utm_source=; spm=; init_refer=; f=n; als=0; xxzl_deviceid=f7ncf14HU5JgLB4Da46OFpOfiJMqrOIrF8yZiskND66PPrnEkr%2BbbAwMLuK%2Bq4ag',
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.108 Safari/537.36'
}
'''


def get_page(url, headers):
    print_line("get_page")
    print("get_page, url:{}".format(url))
    response = requests.get(url, headers=headers)

    soup = BeautifulSoup(response.text, 'lxml')

    noinfo = soup.select('#infolist > div.noinfo.aaa > div.noinfotishi')
    if noinfo is not None and len(noinfo) != 0:
        print('noinfo is not None and len(noinfo) != 0 , url:{}, noinfo:{}'.format(url, noinfo))
        return -1

    links = soup.select('div[class="infocon"] > table > tbody > tr > td.t > a')
    categorys = soup.select('body > div.nav > a:nth-of-type(3)')

    datas = []
    item_link = set_mongodb_connect(dataBaseInfo, itemLinkSheet)

    for link in links:
        data = {
            'link': link.get('href'),
            'category': categorys[0].get_text(),
            'item_title': link.get_text(),
            'create_time': get_now_time(),
            'update_time': get_now_time(),
            'optimistic': 0
        }
        datas.append(data)
        if item_link is not None:
            item_link.insert_one(data)
    print("insert datas.lenght:{}".format(len(datas)))
    return datas


def get_zhuanzhuan_data(zhuanzhuan_url, headers):
    print_line("get_zhuanzhuan_data")
    print("get_zhuanzhuan_data, zhuanzhuan_url:{}".format(zhuanzhuan_url))
    time.sleep(random.uniform(0.1, 0.3))

    response = requests.get(zhuanzhuan_url, headers=headers)
    soup = BeautifulSoup(response.text, 'lxml')

    # 爬虫内容 标题，价格，区域，浏览量
    titles = soup.select('div.info_lubotu.clearfix > div.box_left_top > h1')
    prices = soup.select('div.info_massege.left > div.price_li > span > i')
    areas = soup.select('div.palce_li > span > i')
    view_nums = soup.select('div.box_left_top > p > span.look_time')

    for title, price, area, view_num in zip(
             titles, prices, areas, view_nums):
        if title.get_text() == '':
            continue

        if price.get_text().find("万") != -1:
            price = int(float(price.get_text().split('万')[0])*10000)
        else:
            price = int(price.get_text())
        data = {
            # 'category': category.get_text().split()[0],
            'title': title.get_text(),
            'price': price,
            'area': area.get_text(),
            'view_num': view_num.get_text()
        }
        return data

'''
for page in range(1,10):
    page_url = url + str(page)
    time.sleep(2)
    links = get_page(page_url, headers=headers)

    for link in links:
        zhuanzhuan_url = link.get('href')
        time.sleep(2)
        data = get_zhuanzhuan_data(zhuanzhuan_url, headers=headers)
        if data is not None:
            data['link'] = zhuanzhuan_url
            print(data)
'''