import codecs
import os
import time

import pandas as pd
import requests
from time import sleep
from lxml import etree
import csv
if __name__ == '__main__':
    #对于一些列表的声明
    new_house_titles = []
    new_house_pic_srcs = []
    new_house_lists = []
    new_house_prices = []
    new_house_areas = []
    new_house_positions = []


headers = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 '
               '(KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36 Edg/117.0.2045.34'
               }
for k in range (1,8):
    j = int(0)
    #网页网址只有位数不同，通过循环进行翻页操作
    url = 'https://bj.58.com/chuzu/pn'+str(k)
    #构建文件夹
    if not os.path.exists('./pic'):
        os.mkdir('./pic')
    page_text = requests.get(url=url, headers = headers).text
    tree = etree.HTML(page_text)
    houses_lists = tree.xpath('//ul[@class = "house-list"]/li')

    for house_info in houses_lists:
        #确保可以抓取到数据，在这里应该进行一个测试，预计结果应该是一些元素列表
        #房源的名字
        house_titles = house_info.xpath('.//div[@class="des"]/h2/a/text()')
        #所有房源的详情页链接
        house_lists = house_info.xpath('.//div[@class="des"]/h2/a/@href')
        # 房源图片的地址
        house_pic_srcs = house_info.xpath('.//div[@class = "img-list"]//img/@lazy_src')
        #房屋价格
        house_prices = house_info.xpath('.//div[@class = "money"]/b/text()')
        #房屋面积
        house_areas = house_info.xpath('.//p[@class = "room"]/text()')
        #房屋位置
        house_positions =house_info.xpath('.//p[@class = "infor"]/a/text()')

        #这个循环的作用是将字符串从单个的列表中取出，汇入一个大列表中，确保一类数据存在一个表中
        for i in range(len(house_titles)):
            j+=1
            #对爬取的文字进行一个预处理，确保无空格和乱码
            trans = str.maketrans("", "", " \n \xa0")
            #将处理好的文字存入新的列表
            new_house_areas.append(house_areas[i].translate(trans))
            new_house_titles.append(house_titles[i].translate(trans))
            new_house_prices.append(house_prices[i].translate(trans))
            new_house_positions.append(house_positions[i].translate(trans))
            new_house_lists.append(house_lists[i].translate(trans))
            new_house_pic_srcs.append(house_pic_srcs[i].translate(trans))
            # num = list(range(1, len(house_titles) + 1))

            #房屋图片获取
            img_data = requests.get(url=house_pic_srcs[-1], headers=headers).content
            #房屋图片命名，以帖子的名字命名，将图片保存到pic的文件夹下
            fileName = new_house_titles[-1].split('|')[-1]
            img_path = './pic/'+fileName
            with open(img_path+'.jpg', 'wb') as fp:
                fp.write(img_data)
            #页内计数器
            print(j)
    # 每爬取一个页面后休眠30秒，应对反扒机制
    print('第' + str(k) + '页完成')
    print('休眠30秒')
    for p in range(30, 0, -1):
        print(p, ' ', end='')
        time.sleep(1)
maxlen = max(len(new_house_titles),len(new_house_areas),len(new_house_positions),len(new_house_prices),len(new_house_pic_srcs),len(new_house_lists))
with open('./house_info.csv','w',newline='',encoding='utf-8') as csvfile:
    writer = csv.writer(csvfile)
    writer.writerow(['房源', '房屋面积', '房屋地址', '房屋价格（元/月）', '房屋图片url', '房屋详情页url'])
    for i in range(maxlen):
        row = []
        if i < len(new_house_titles):
            row.append(new_house_titles[i])
        else:
            row.append('')
        if i < len(new_house_areas):
            row.append(new_house_areas[i])
        else:
            row.append('')
        if i < len(new_house_positions):
            row.append(new_house_positions[i])
        else:
            row.append('')
        if i < len(new_house_prices):
            row.append(new_house_prices[i])
        else:
            row.append('')
        if i < len(new_house_pic_srcs):
            row.append(new_house_pic_srcs[i])
        else:
            row.append('')
        if i < len(new_house_lists):
            row.append(new_house_lists[i])
        else:
            row.append('')
        writer.writerow(row)




        #     #将数据存入csv文件中
# df = pd.DataFrame({'序号':num,'房源':new_house_titles,'房屋面积':new_house_areas,'房屋地址）':new_house_positions,'房屋价格（元/月）':new_house_prices,'房屋图片url':new_house_pic_srcs,'房屋详情页url':new_house_lists})
# df.to_csv('./house_info.csv','a',index=False,encoding='utf-8')


    # fieldnames = ['房源', '房屋面积', '房屋地址', '房屋价格（元/月）', '房屋图片url', '房屋详情页url']
    # rows = list(zip(new_house_titles,new_house_areas,new_house_positions,new_house_prices,new_house_pic_srcs))
    # with open("my_file.csv", "w",encoding='utf-8') as f:
    #
    #     writer = csv.DictWriter(f,fieldnames=fieldnames)
    #     # writer = csv.writer(f)
    #     writer.writeheader()
    #     for row in rows:
    #         writer.writerow({'房源':row[0], '房屋面积':row[1],'房屋地址':row[2],'房屋价格（元/月）':row[3],'房屋图片url':row[4],'房屋详情页url':row[5]})





    # 每爬取一个页面后休眠30秒，应对反扒机制

#






    #
    #
    #
    #
    #
    #
    #
    #
    #
    #
    #
    #
    #
    #     print(src_lists1,end = '')
    # #所有房源的详情页链接
    # src_lists = tree.xpath('/html/body/div[6]/div[2]/ul/li[9]/div[2]/h2/a/@href')
    # #房源的名字
    # home_titles = tree.xpath('/html/body/div[6]/div[2]/ul/li[11]/div[2]/h2/a/text()')
    # #房源图片的地址
    # home_pic_srcs = tree.xpath('/html/body/div[6]/div[2]/ul/li[11]/div[1]/a/img/@lazy_src')
    # new_home_titles =[]
    # for i in range(len(home_titles)):
    #     trans = str.maketrans("", "", " \n")
    #     home_titles.append(home_titles[i].translate(trans))
    #
    #
    # src_lists = tree.xpath('/html/body/div[6]/div[2]/ul/li[9]/div[2]/h2/a/@href')
    # #房源的名字
    # home_titles = tree.xpath('/html/body/div[6]/div[2]/ul/li[11]/div[2]/h2/a/text()')
    # #房源图片的地址
    # home_pic_srcs = tree.xpath('/html/body/div[6]/div[2]/ul/li[1]/div[1]/a/img/@lazy_src')
    # for i in range(len(src_lists)):
    #     # trans = str.maketrans("", "", " ")
    #     # home_new_titles = home_titles.translate(trans)
    #     # print(home_new_titles)
    #     print(home_titles)
    #     print(src_lists)
    #     print(home_pic_srcs)
    #
    #     print(a_titles,a_srcs)
    # name_lists = []
    # for a_src in a_srcs:
    #     name_srcs = 'https://bj.58.com'+a_src+'pn'
    #     #一个全是网址的列表
    #     name_lists.append(name_srcs)
    # with open('./物品和网址.csv','w',newline='',encoding='utf-8') as csvfile:
    #     writer = csv.writer(csvfile)
    #     # 写入列标题
    #     # writer.writerow(['物品名称', '网址'])
    #     # 这里我们使用zip函数将两个列表配对，然后写入CSV文件
    #     for row in zip(a_titles, name_lists):
    #         writer.writerow(row)
    #
    # with open('物品和网址.csv','r',encoding='utf-8') as csvfile:
    #     reader = csv.reader(csvfile)
    #     # row一个含有物品和网址的对应列表，只含有一组值。reader中含有多个小列表。
    #     for row in reader:
    #         # print(row[1])
    #         for j in range(1, 6):
    #             print(j)
    #             data_src = row[1] +str(j)+'/'
    #             print(data_src)
    #             print(row[0])
    #             page_text_data = requests.get(url=data_src, headers=headers).text
    #             tree = etree.HTML(page_text_data)
    #             data_text = tree.xpath('//div[@class = "t info"]/a/h1/text()')
    #             # print(data_text)
    #             sleep(2)
