import requests
from lxml import etree
from threading import Thread, Semaphore
import csv
import random
import time


class House_spider(Thread):
    def __init__(self, page):
        super(House_spider, self).__init__()
        # 设置代理池
        pro_list = [
            ' 171.35.168.65:9999',
            ' 175.43.32.39:9999',
            ' 123.169.120.154:9999',
            '49.86.177.103:9999',
        ]
        hostprot = random.choice(pro_list)
        self.proxy_ip = {
            'http': f"http://{hostprot}"
        }
        self.url = f'https://tj.58.com/jianxiantj/ershoufang/p{page}/'
        self.headers = {
            'cookie': 'f=n; commontopbar_new_city_info=18%7C%E5%A4%A9%E6%B4%A5%7Ctj; commontopbar_ipcity=tj%7C%E5%A4%A9%E6%B4%A5%7C0; userid360_xml=BD16E7E10D5D28F7D35894BE9DE6C19B; time_create=1617027306640; id58=c5/nfGA6U90yx971EFj1Ag==; sessid=AD33EEA8-C8A5-4C7F-87E0-A829A36CDB2D; aQQ_ajkguid=DE711EB1-ECCE-4017-963D-12BEA81B57B5; seo_source_type=0; 58tj_uuid=7b6184c8-0754-4689-90e9-74c6badda49a; new_uv=1; utm_source=sem-sales-baidu-pc; spm=115364543327.28118550838; new_session=0; als=0; 58_ctid=18; is_58_pc=1; commontopbar_new_city_info=17%7C%E5%A4%A9%E6%B4%A5%7Ctj; init_refer=; f=n; ctid=18',
            'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.182 Safari/537.36',
        }

    def get_date(self):
        sm.acquire()
        response = requests.get(url=self.url, proxies=self.proxy_ip)
        res = response.text
        time.sleep(2)
        sm.release()
        return res

    def parse_data(self, res):
        #     序列化html
        html = etree.HTML(res)
        houses = html.xpath('//*[@id="__layout"]/div/section/section[3]/section[1]/section[2]/div')
        house_list = []
        for house in houses:
            dic = {}
            dic['department'] = house.xpath('./a/div[2]/div[1]/section/div[2]/p[1]/text()')
            dic['detail'] = house.xpath('./a/div[2]/div[1]/div[1]/h3/text()')
            dic['area'] = house.xpath('./a/div[2]/div[1]/section/div[1]/p[2]/text()')
            dic['total_money'] = house.xpath('./a/div[2]/div[2]/p[1]/span[1]/text()')
            dic['average_money'] = house.xpath('./a/div[2]/div[2]/p[2]/text()')
            dic['create_time'] = house.xpath('./a/div[2]/div[1]/section/div[1]/p[5]/text()')
            dic['floor'] = house.xpath('./a/div[2]/div[1]/section/div[1]/p[4]/text()')
            dic['room'] = house.xpath('./a/div[2]/div[1]/section/div[1]/p/span/text()')
            dic['href'] = house.xpath('./a/@href')
            house_list.append(dic)
        return house_list

    def save_data(self, dic):
        with open('data/蓟州二手房.csv', 'a', encoding='utf-8_sig') as f:
            fields = ['房源小区', '描述', '面积大小', '总价', '平均', '楼层', '建造时间', '楼层布局', '房源链接']
            #                 构造一个写入csv对象
            writer = csv.DictWriter(f, fieldnames=fields)
            writer.writeheader()
            for house in dic:
                #     拼接字符串
                room = ''
                for i in house['room']:
                    room += i
                # 数据二次处理
                if not house['create_time']:
                    create_time = ''
                    data = {'房源小区': house['department'][0], '描述': house['detail'][0], '面积大小': house['area'][0],
                            '总价': house['total_money'][0], '平均': house['average_money'][0], '楼层': house['floor'][0],
                            '建造时间': create_time, '楼层布局': house['room'][0], '房源链接': house['href'][0]}
                    writer.writerow(data)
                else:
                    data = {'房源小区': house['department'][0], '描述': house['detail'][0], '面积大小': house['area'][0],
                            '总价': house['total_money'][0], '平均': house['average_money'][0], '楼层': house['floor'][0],
                            '建造时间': house['create_time'][0], '楼层布局': room, '房源链接': house['href'][0]}
                    writer.writerow(data)
            print('保存成功!!')

    def run(self):
        res = self.get_date()
        data = self.parse_data(res)
        self.save_data(data)


if __name__ == '__main__':
    sm = Semaphore(4)
    t_list = []
    page = int(input('输入您要爬取的页数>>>'))
    for i in range(1, page + 1):
        sp = House_spider(i)
        sp.start()
        t_list.append(sp)
    for sp in t_list:
        sp.join()
