#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Project: spd-sxmcc
"""
@author: lyndon
@time Created on 2018/12/24 13:59
@desc
"""

import os
import requests
from bs4 import BeautifulSoup

user_agent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36"
headers = {"User-Agent": user_agent}  # 请求头,headers是一个字典类型
url_pre = 'https://ty.5i5j.com'

with open(r'D:\iProject\myPython\com\teradata\laccelllatitude\wowj_target2.txt', 'w') as w:
    user_agent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36"
    headers = {"User-Agent": user_agent}  # 请求头,headers是一个字典类型
    url = "https://ty.5i5j.com/xiaoqu/n{}"  # 太原
    # apartments = []
    for i in range(1, 500):
        res = requests.get(url.format(i), headers=headers)
        res.encoding = "utf-8"  # 设置编码，防止乱码
        soup = BeautifulSoup(res.text, "lxml")
        lis = soup.select('.listCon')
        # apartments = []
        for li in lis:
            apartment = {}
            apartment['name'] = li.select_one('.listTit').text.strip()
            apartment['url'] = li.select_one(".listTit a").get('href')
            apartment['p'] = [tx.text.encode('utf-8').replace('\r', '').replace('\n', '').replace('\s+', '').replace(' ', '') for tx in li.select(".listX p")]


            # url_detail = url_pre + apartment['url']
            # res = requests.get(url_detail, headers=headers)
            # res.encoding = "utf-8"  # 设置编码，防止乱码
            # soup2 = BeautifulSoup(res.text, "lxml")
            # apartment['fangs'] = [s.text.encode('utf-8').replace('\r', '').replace('\n', '').replace('\s+', '').replace(' ', '') for s in soup2.select('li') if
            #                     '房屋总数：' in s.encode('utf-8')]
            #

            print(apartment['name'])
            print(apartment['url'])
            print(apartment['p'])
            line = apartment['name'] + '|' + apartment['url']
            for p in apartment['p']:
                line = line + '|' + p.decode('utf-8')

            # for fangs in apartment['fangs']:
            #     try:
            #         print(fangs)
            #         print(type(fangs))
            #         print(type(line))
            #         line = line + '|' + fangs.decode('utf-8')
            #     except:
            #         line = line + '|'

            line = line + '\n'

            w.write(line.encode('utf-8'))
            w.flush()
            # print(apartment['build_time'])
            # for bt in apartment['build_time']:
            #     print(bt)
            # for lou in apartment['lous']:
            #     print(lou)
            # for ifs in apartment['fangs']:
            #     print(ifs)

            # apartment['id'] = int(li.select("a[href*='exchange']")[0]['href'].split('/')[-1].strip())  # 交易号
            # apartment['community'] = li.select("ul.list-info-l a[href*='community']")[0].text.strip()  # 小区
            # apartment['communityId'] = int(
            #     li.select("ul.list-info-l a[href*='community']")[0]['href'].split('/')[-1].strip())  # 小区编号
            # # apartment['adress']=li.select('ul.list-info-l a:nth-of-type(2)')[0].text.strip()#地址
            # apartment['type'] = li.select('li.font-balck span:nth-of-type(1)')[0].text.strip()  # 房型
            # apartment['price'] = int(li.select('.list-info-r p')[0].text.rstrip('元/平米').strip())  # 单价 元/平米
            # apartment['totalPrice'] = int(li.select('div.list-info-r h3')[0].text.rstrip('万元').strip())  # 抓取总价 万元
            # apartments.append(apartment)

        # for i in apartments:
        #     print(i['name'])
        #     print(i['url'])
        #     infos = i['infos']
        #
