#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Project: spd-sxmcc
"""
@author: lyndon
@time Created on 2018/12/24 13:59
@desc
"""

import os
import requests
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.webdriver.support.wait import WebDriverWait
from pyquery import PyQuery as pyq

user_agent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36"
headers = {"User-Agent": user_agent}  # 请求头,headers是一个字典类型
url_pre = 'https://ty.5i5j.com'

params = DesiredCapabilities.PHANTOMJS
params['phantomjs.page.settings.userAgent'] = 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.162 Safari/537.36'

browser = webdriver.PhantomJS(desired_capabilities=params)


# soup2 = BeautifulSoup(res.text, "lxml")
# apartment['fangs'] = [s.text.encode('utf-8').replace('\r', '').replace('\n', '').replace('\s+', '').replace(' ', '') for s in soup2.select('li') if
#                     '房屋总数：' in s.encode('utf-8')]

def get_detail(url):
    browser.get(url)
    WebDriverWait(browser, 10)
    pagesource = browser.page_source
    py_html = pyq(pagesource, parser='html')
    page = py_html('.xqfangs')

    print(page.text())
    print('=================1==================')
    soup2 = BeautifulSoup(page.text(), 'lxml')
    tt = soup2.text
    print(tt)
    print(type(tt))

    print('==================2=================')
    infos = [i for i in tt.split('\n')]
    print(infos)
    # dd = [s for s in soup2.text if '房屋总数：' in s.encode('utf-8')]
    print('=================3==================')

    for i in infos:
        print(i)
    print('=================4==================')


# with open(r'D:\iProject\myPython\com\teradata\laccelllatitude\wowj_target2.txt', 'r') as f:
#     user_agent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36"
#     headers = {"User-Agent": user_agent}  # 请求头,headers是一个字典类型
#     url = "https://ty.5i5j.com/xiaoqu/n{}"  # 太原
#     for line in f:
#         url = line.split('|')[1]
#         url = url_pre + url
#         print(url)

url = 'https://ty.5i5j.com/xiaoqu/415471.html'
get_detail(url)


    # apartments = []
    # for i in range(1, 500):
    #     res = requests.get(url.format(i), headers=headers)
    #     res.encoding = "utf-8"  # 设置编码，防止乱码
    #     soup = BeautifulSoup(res.text, "lxml")
    #     lis = soup.select('.listCon')
        # apartments = []
        # for li in lis:
        #     apartment = {}
        #     apartment['name'] = li.select_one('.listTit').text.strip()
        #     apartment['url'] = li.select_one(".listTit a").get('href')
        #     apartment['p'] = [tx.text.encode('utf-8').replace('\r', '').replace('\n', '').replace('\s+', '').replace(' ', '') for tx in li.select(".listX p")]


            # url_detail = url_pre + apartment['url']
            # res = requests.get(url_detail, headers=headers)
            # res.encoding = "utf-8"  # 设置编码，防止乱码
            # soup2 = BeautifulSoup(res.text, "lxml")
            # apartment['fangs'] = [s.text.encode('utf-8').replace('\r', '').replace('\n', '').replace('\s+', '').replace(' ', '') for s in soup2.select('li') if
            #                     '房屋总数：' in s.encode('utf-8')]
            #

            # print(apartment['name'])
            # print(apartment['url'])
            # print(apartment['p'])
            # line = apartment['name'] + '|' + apartment['url']
            # for p in apartment['p']:
            #     line = line + '|' + p.decode('utf-8')

            # for fangs in apartment['fangs']:
            #     try:
            #         print(fangs)
            #         print(type(fangs))
            #         print(type(line))
            #         line = line + '|' + fangs.decode('utf-8')
            #     except:
            #         line = line + '|'

            # line = line + '\n'
            #
            # w.write(line.encode('utf-8'))
            # w.flush()