'''
通过前两步的准备,现在开始整个采集
'''

from bs4 import BeautifulSoup as BS
import os,xlwt,xlrd,traceback,codecs
from urllib import request
from xlrd import open_workbook
from xlutils.copy import copy
import requests

from lianjiademo.jesse.step1 import step1
from lianjiademo.jesse.step2 import step2
'''
:param    home_url:链家网首页
'''
def start_(home_url):
    # 首先获取所有的B类urls
    urlsb_ = step2(home_url)
    # 通过每一个B类url,抓取A类url
    urlsa_ = []
    for _urlb in urlsb_:
        urlsa_.extend(step1(_urlb))
    urlsa_ = list(set(urlsa_))
    # 下一步,通过A类url,抓取详细信息
    for url in urlsa_ : 
        get_content_(url)
    
def get_content_(url):  
    result = {}
    html=request.urlopen(url).read()
    soup=BS(html,"lxml")
    title = soup.find("h1",attrs={"class":"main"}).text
    price_div = soup.find("div",attrs={"class":"price"}).text
    price_div = price_div.replace("\n","")
    info1 = soup.find("div",attrs={"class":"zf-room"}).find_all('p')
    result['标题'] = title
    result['价格1'] = price_div
    result['价格2'] = price_div.split("元")[0]
    pm = lambda x : ("元" + x.split("元")[1]) if "元" in x else "元/月"
    result['价格3'] =pm(price_div)
    for info1_ in info1:
        info1s = info1_.text.split("：")
        if len(info1s) == 2:
            result[info1s[0]] = info1s[1]
    write_content(result)
    
def write_content(result):
    f = codecs.open("result.txt", "a", "utf-8")
    f.write(str(result)+"\n")
    f.close()
    
# 测试
# start_("https://sz.lianjia.com/zufang/")
if __name__=="__main__" :
#     get_content_("https://sz.lianjia.com/zufang/105100764152.html")
#     start_("https://sz.lianjia.com/zufang/")
    header = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.143 Safari/537.36'
        }
    
    response=requests.get("https://sz.lianjia.com/zufang/pg7/",headers=header,timeout=3,proxies={"http":"http://183.32.89.65:808"} )
#     response=requests.get("https://sz.lianjia.com/zufang/pg7/",headers=header,timeout=3)
    path_url = response.request.path_url
    print(path_url)
    soup=BS(response.text,"lxml")
    print(str(soup))


