import requests
from bs4 import BeautifulSoup
import json
import re
import time
import random
import os
base_url= 'https://hz.lianjia.com'
base_dir = os.path.join(os.path.expanduser("~"), 'Downloads')
detail_base_url = 'https://hz.lianjia.com/ershoufang/'

headers = {
    'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36 Edg/131.0.0.0',
    'Cookie':'lianjia_uuid=e0a64d84-3b62-4c0d-bc4f-bc4e6775d22c; _ga=GA1.2.436289972.1737816470; lfrc_=fc220f33-f375-4eca-9e75-5a2d48bfb8de; sensorsdata2015jssdkcross=%7B%22distinct_id%22%3A%221949deee437164c-0594ea78cadd9c-7e433c49-1296000-1949deee4382e8b%22%2C%22%24device_id%22%3A%221949deee437164c-0594ea78cadd9c-7e433c49-1296000-1949deee4382e8b%22%2C%22props%22%3A%7B%22%24latest_traffic_source_type%22%3A%22%E7%9B%B4%E6%8E%A5%E6%B5%81%E9%87%8F%22%2C%22%24latest_referrer%22%3A%22%22%2C%22%24latest_referrer_host%22%3A%22%22%2C%22%24latest_search_keyword%22%3A%22%E6%9C%AA%E5%8F%96%E5%88%B0%E5%80%BC_%E7%9B%B4%E6%8E%A5%E6%89%93%E5%BC%80%22%7D%7D; crosSdkDT2019DeviceId=-ndyooo-fws840-p3bqidfs3jh8rns-cf1q4c16e; Hm_lvt_46bf127ac9b856df503ec2dbf942b67e=1737998094,1738384226,1738641665,1738811864; HMACCOUNT=F140DA9AB3D80D49; _gid=GA1.2.613992956.1738811876; hip=Oag_YNDbjSsLp6KOrV89XZDC341yIx6D-Dyfqo-JLFPyP6GfRfswcFHv2K2eXdw0iMn1AyyDNOCHMlDu6o96B9EDM2IDh6s8ueISK2GigZAV1OBj0rdkaAjwhq4idsBMdgNOtwxZsBMs77dx-mk30g1opeDUt_wuc3BSYqQaIxlBNuo_PVAQhNW5Rg%3D%3D; _ga_WLZSQZX7DE=GS1.2.1738856547.2.0.1738856547.0.0.0; _ga_TJZVFLS7KV=GS1.2.1738856547.2.0.1738856547.0.0.0; select_city=330100; lianjia_ssid=b6f4c585-eba0-4cc5-bf4d-4dc144416ba9; login_ucid=2000000464925119; lianjia_token=2.0010419a754614c62601ecb344348a5a6d; lianjia_token_secure=2.0010419a754614c62601ecb344348a5a6d; security_ticket=BRozp1P9Zxj06BpsWsdBkvhnUFcBexIrpF89Iqu+9/mibV76lggMA9Gu5R7OVU15TWL0iOXxZHyBF/rI6Oipoq0PN4JeWaqgdIR4EqUvqybLFgJoGCUENf5Q8jEynBOp4djOXz7UaXnyHsSuIPtDSYbedXODkR+gZlynqrvtVpk=; ftkrc_=fb199679-faf9-45d5-ae8d-3a93920813ab; _gat=1; _gat_past=1; _gat_global=1; _gat_new_global=1; _gat_dianpu_agent=1; srcid=eyJ0Ijoie1wiZGF0YVwiOlwiZTYyYWYyYzc5MDJlYmRiZDE5NjY4NGQ3NzI3ZjhjYzZjNDY3OWI0MGVkNTFkOTgyZGI2Mzg2M2UyNTI4ODJiMWMxZjM3NTBmOTNjODRkYTY2NGZhZmI0MTE3OWNlYjgxZDkxYjhhMGU1NDIwYmM1Zjg5MjhlYjdkY2RhNjg2MTgxNzc1NzM3ZmU0MjUwZjZmNTE3MDZiMGJjNDc4OGZjZWY1ZTViZTExNGQ0YWIxYTIwNGJiZWFiMTFhY2U1MTEzN2NmZDIyY2E1OTljNGRiMDNjYjNhYjdiN2ZlODdhNTY3ODE4NDNkNzhjNjc5ODg1ZGNjNjRhYWJjYjM2ZWZlNVwiLFwia2V5X2lkXCI6XCIxXCIsXCJzaWduXCI6XCJmMjdlZTRmY1wifSIsInIiOiJodHRwczovL2h6LmxpYW5qaWEuY29tL2Vyc2hvdWZhbmcvMTAzMTM2NjY4NzM0Lmh0bWwiLCJvcyI6IndlYiIsInYiOiIwLjEifQ==; _ga_1W6P4PWXJV=GS1.2.1738856374.43.1.1738857894.0.0.0; _ga_W9S66SNGYB=GS1.2.1738856374.43.1.1738857894.0.0.0; Hm_lpvt_46bf127ac9b856df503ec2dbf942b67e=1738857899'
}
#地区标签
area_tag_list = ['dongzhou']
#层高  低中高层楼房
house_location_tag_list = ['lc1','lc2','lc3']
# 几居室
badroom_tag_list = [ 'l1', 'l2','l3','l4','l5']


#获取少于3000以下的链接列表
def get_less_3000_url_list():
    list = []
    for area in area_tag_list:
        for l in house_location_tag_list:
            temp_url = base_url +  '/ershoufang/' + area + '/'+l + '/'
            text = get_page_text(temp_url)
            num = get_house_number(text)
            if(num > 3500):
                for br in badroom_tag_list:
                    list.append(temp_url.rstrip('/')+ br + '/')
            else:
                list.append(temp_url)
    return list
    

# 获取当前页面的id
def get_id_list(text):
    soup = BeautifulSoup(text, 'html.parser')
    items = soup.find_all('li', attrs={'data-lj_action_housedel_id': True})
    list = []
    for item in items:
        list.append(item['data-lj_action_housedel_id'])
    
    return list

# 获取所有链接数组
def get_all_page_url(text):
    get_page_info(text)
    page_info  = get_page_info(text)
    totalPage = page_info['totalPage']
    curPage = page_info['curPage']
    soup = BeautifulSoup(text, 'html.parser')
    url = soup.find("div",attrs={'page-url': True})['page-url']
    url_list = []
    for i in range(totalPage):
        new_url = re.sub(r'\{page\}', str(i+1), url)
        url_list.append(new_url+'/')
    return url_list

#获取 详情url
def get_huose_info_url(text,id):
    path = get_next_url(text)
    match = re.search(r'/([^/]+)', path)
    area = match.group(1)
    return base_url + '/' + area + '/' + str(id)+ '.html'





# 获取页面数据
def get_page_info(text):
    soup = BeautifulSoup(text, 'html.parser')
    page_data_str = soup.find("div",attrs={'page-data': True})['page-data']
    return json.loads(page_data_str)
    
#获取下一页链接地址
def get_next_url(text):
    soup = BeautifulSoup(text, 'html.parser')
    url = soup.find("div",attrs={'page-url': True})['page-url']
    page_info  = get_page_info(text)
    new_page = page_info['curPage'] + 1
    if(new_page > page_info['totalPage']):
        return 'none'
    new_url = re.sub(r'\{page\}', str(new_page), url)
    return  new_url


# 获取页面房屋数量
def get_house_number(text):
    soup = BeautifulSoup(text, 'html.parser')
    h_num = soup.find('h2', class_='total fl')
    if h_num:
        return  int(h_num.find('span').get_text(strip=True))
    return 0
# 获取页面文本
def get_page_text(url):
    r = requests.get(url, headers=headers)
    return r.text

#保存网页内容
def save_text(url):
    text = get_page_text(url)
    soup = BeautifulSoup(text, 'html.parser')
    
    f = open('/Users/xiatian/Downloads/test-py-detail.txt','w')
    f.write(soup.prettify())

#获取随机时间
def random_sleep(min,max):
    time.sleep(random.uniform(min, max)) 

#展开二维数组
def expand_list(list):
    return [item for sublist in list for item in sublist]

if __name__ == "__main__":
    print(get_less_3000_url_list())