import json
import time
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from lxml import etree
import csv

url='https://beijing.anjuke.com/'

driver=webdriver.Edge()

#   打开浏览器(因为重新打开浏览器直接加载cookie会失效,所以需要先进去，然后加载cookies，再跳转到那个网页，再刷新网页才可以避免重新登录)
driver.get(url)

#   读取之前保存的cookies为了跳过登录
with open('cookies.json','r',encoding='utf-8') as file:
    cookies=file.read()
#   对读取到的cookie格式进行处理
cook_list=cookies.split("\n")
#   最后一行存在空值，去掉最后一行
for x in range(0,len(cook_list)-1):
    dict_cookie=json.loads(cook_list[x])
    #   由于之前存的cookies的时间戳是当时时间的，现在已经过期了，所以需要将其值改为当前的时间戳还要往后面一些的时间戳
    dict_cookie['expiry']=int(time.time() + 86400)
    #   需要更改secure的值为true才可以使用https://协议访问网页
    dict_cookie['secure']=True
    #   将每一条cookie添加到驱动
    driver.add_cookie(dict_cookie)

#   打开网页
driver.get(url)

#   刷新网页
driver.refresh()
driver.maximize_window()

#   定位到租房点击     rent:租
rent_house=driver.find_element(By.XPATH,'//*[@id="ajk-home"]/div[1]/div[6]/div[2]/div/div/ul/li[4]/a')
rent_house.click()
#   更改驱动操控的页面到最后一个标签,不更改的话，驱动操作的还是第一个页面
driver.switch_to.window(driver.window_handles[-1])
time.sleep(5)

#   定义循环爬取数据的函数:
def get_datas(number):
    #   判断爬取的页数
    print("第"+str(number)+"页开始爬取")
    print("打印网页url:",driver.current_url)
    #   得到html源码
    html_str = driver.page_source
    # print(html_str)
    html = etree.HTML(html_str)

    #   得到最大的div盒子(不知道是不是因为每次打开浏览器的网页结构都会不一样还是怎么滴，反正只使用绝对路径去找数据，永远找不到，只能使用id和class去定位)
    max_div = html.xpath('//div[@id="list-content"]')
    #   在数据div下面逐级去拿到想要的数据
    for x in max_div:
        #   拿到所有标题的数据
        title_list = x.xpath('//div[@class="zu-itemmod clearfix"]/div[1]/h3/a/b/text()')
        print(title_list)
        print("标题的数量:", len(title_list))

        #   拿到所有房屋特点后面一些单位的数据(比如室和厅)
        house_trait_end_list = x.xpath(
            '//div[@class="zu-itemmod clearfix"]/div[1]//p[@class="details-item tag"]/text()')
        # print(house_trait_end_list)
        #   发现数据混乱，对其进行处理
        #   1.将回车符的数据去掉(del 根据索引删除)
        for j in range(0, len(house_trait_end_list)):
            if '\n' in str(house_trait_end_list[j]).replace('\n', "").replace('', ''):
                del house_trait_end_list[j]
            else:
                house_trait_end_list[j] = str(house_trait_end_list[j]).strip().replace('', '')
        #   将空格的数据去除掉
        house_trait_end_list = [x for x in house_trait_end_list if x != '']
        # print(house_trait_end_list)
        #   是一个房源信息的单位放到一个list里面
        new_house_trait_end_list = []
        n = 0
        while (True):
            small_list = []
            for j in range(n, n + 4):
                # print(j)
                small_list.append(house_trait_end_list[j])
            new_house_trait_end_list.append(small_list)
            n = n + 4
            if n + 4 > len(house_trait_end_list):
                break
        #   检查成果
        # for p in new_house_trait_end_list:
        #     print(p)
        # print(len(new_house_trait_end_list))
        #   拿到房屋特点的数字
        house_trait_number_list = x.xpath('//div[@class="zu-itemmod clearfix"]/div[1]/p//b[@class="strongbox"]/text()')
        #   将每三个数字放到一个list里面
        new_house_trait_number_list = []
        n = 0
        while (True):
            small_list = []
            for h in range(n, n + 3):
                small_list.append(house_trait_number_list[h])
            new_house_trait_number_list.append(small_list)
            n = n + 3
            if n >= len(house_trait_number_list):
                break
        #   检查结果
        # for g in new_house_trait_number_list:
        #     print(g)
        #   将几室几厅连接起来,并且不要忘记了
        finally_house_trait_list = []
        for f in range(0, len(new_house_trait_number_list)):
            traits_list = []
            for k in range(0, len(new_house_trait_number_list[f])):
                trait = str(new_house_trait_number_list[f][k]) + str(new_house_trait_end_list[f][k])
                traits_list.append(trait)
            traits_list.append(new_house_trait_end_list[f][-1])
            finally_house_trait_list.append(traits_list)
        #   检查数据
        # for l in finally_house_trait_list:
        #     print(l)
        print(finally_house_trait_list)
        print("房屋特点的数量:", len(finally_house_trait_list))

        #   拿到每月多少钱的数据
        money_list = x.xpath('//div[@class="zu-side"]/strong[@class="price"]/text()')
        #   拿到支付的单位(月或者年)
        money_end_list = x.xpath('//div[@class="zu-side"]/span[@class="unit"]/text()')
        #   将价格和年月拼接
        finally_money_list = []
        for j in range(0, len(money_list)):  # merge:合并
            merge = str(money_list[j]) + str(money_end_list[j])
            finally_money_list.append(merge)
        print(finally_money_list)
        print("价格的数量:", len(finally_money_list))

        #   拿到房东的数据         landlord:房东
        landlord_list = x.xpath('//div[@class="zu-info"]//span[@class="jjr-info"]/text()')
        print(landlord_list)
        print("房东的数量:", len(landlord_list))

        #   拿到小区的名字     plot:小区
        plot_list = x.xpath('//div[@class="zu-info"]/address[@class="details-item tag"]/a/text()')
        print(plot_list)
        print("小区的数量:", len(plot_list))

        #   拿到详细的区域名,办事处(镇)名字,具体的线路名字   details:详细
        details_list = x.xpath('//div[@class="zu-info"]/address[@class="details-item tag"]/text()')
        #   对得到的数据进行处理          handle(处理)
        for j in range(0, len(details_list)):
            details_list[j] = str(details_list[j]).replace("\n", '').replace("\xa0\xa0\n ", '').strip()
        #   删除空值
        details_list = [x for x in details_list if x != '']
        finally_address_list = []
        n = 0
        while (True):
            small_list = []
            for j in range(n, n + 3):
                small_list.append(details_list[j])
            finally_address_list.append(small_list)
            n = n + 3
            if n >= len(details_list):
                break
        print(finally_address_list)
        print("详细地址的数量:", len(finally_address_list))

        #   拿到其他特征(因为每一条房源数据的其他特点数量不同意，所以这里采取循环的方式读取其他特点的数据)
        other_trait_list = []
        xpath_start = 'div['
        xpath_end = ']/div[1]/p[2]//span[@class="cls-common"]/text()'
        for h in range(3, 64):
            data = x.xpath(xpath_start + str(h) + xpath_end)
            if len(data) == 0:
                break
            else:
                other_trait_list.append(data)
        # for p in other_trait_list:
        #     print(p)
        print(other_trait_list)
        print("其他特点的数量:", len(other_trait_list))

        #   拿到详细信息的url
        url_list = x.xpath('//div[@class="zu-itemmod clearfix"]/a[@class="img"]/@href')
        #   对数据进行处理
        for h in range(0, len(url_list)):
            url_list[h] = str(url_list[h]).split('?')[0]
        print(url_list)
        print("url连接的数量", len(url_list))

        #   将数据汇总到一个列表里面
        rows = []
        for c in range(0, 60):
            if number==1:
                rows.append([
                    c, title_list[c], finally_house_trait_list[c], finally_money_list[c], landlord_list[c],
                    plot_list[c], finally_address_list[c], other_trait_list[c], url_list[c]
                ])
            elif number!=1 and c==0:
                rows.append([
                    (number-1)*60, title_list[c], finally_house_trait_list[c], finally_money_list[c], landlord_list[c],
                    plot_list[c],finally_address_list[c], other_trait_list[c], url_list[c]
                ])
            else:
                rows.append([
                   ((number-1)*60)+c, title_list[c], finally_house_trait_list[c], finally_money_list[c],
                    landlord_list[c],plot_list[c],finally_address_list[c], other_trait_list[c], url_list[c]
                ])
        #   将数据写入文件
        with open('data01.csv', 'a', newline='', encoding='utf-8') as file:
            #   创建csv写入对象
            writer = csv.writer(file)
            #   写入数据
            writer.writerows(rows)
    if number!=50:
        #   滑到页面底部并休息(防止反爬说频繁点击)
        # 或者，使用 JavaScript 滚动到页面底部
        driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
        time.sleep(5)
        #   点击下一页
        next_button = driver.find_element(By.XPATH, '/html/body/div[5]/div[3]/div[3]/div/a[@class="aNxt"]')
        next_button.click()
        #   休息五秒加载页面
        time.sleep(5)

for x in range(1,51):
    get_datas(x)
    print("*"*25)


#   在控制台回车才可以结束程序
input("在控制台回车关闭网页")
driver.quit()
