'''
@-*- coding: utf-8 -*-
@ python:python 3.9
@ 创建人员:allen
@ 创建时间:2024/8/20
'''

# https://cq.lianjia.com/ershoufang/
# https://cq.lianjia.com/ershoufang/jiangbei/
# https://cq.lianjia.com/ershoufang/jiangbei/pg1/
# https://cq.lianjia.com/ershoufang/jiangbei/pg2/

import requests
import parsel
import re
import csv
import time

area = 'shapingba'
fileName = '沙坪坝'

area1 = ['大渡口'
,'江津'
,'北碚'
,'开州区'
,'巫山县'
,'巫溪县'
,'秀山土家族苗族自治县'
,'酉阳土家族苗族自治县'
,'荣昌区'
,'彭水苗族土家族自治县'
,'忠县'
,'奉节县'
,'垫江县'
,'城口县'
,'铜梁'
,'璧山'
,'合川'
,'长寿'
,'万州'
,'涪陵']
area2 = ['dadukou'
,'jiangjin'
,'beibei'
,'kaizhouqu'
,'wushanxian'
,'wuxixian'
,'xiushantujiazumiaozuzizhixian'
,'youyangtujiazumiaozuzizhixian'
,'rongchangqu'
,'pengshuimiaozutujiazuzizhixian'
,'zhongxian'
,'fengjiexian'
,'dianjiangxian'
,'chengkouxian'
,'tongliang'
,'bishan'
,'hechuan'
,'changshou'
,'wanzhou'
,'fuling'
]



def test_proxy(ip):


    # 设置headers
    headers = {"User-Agent": 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36 Edg/126.0.0.0'}
    url = "https://cq.lianjia.com/ershoufang/jiangbei/pg1/"
    # 设置代理信息
    proxies = {"http": ip}
    # 通过请求百度首页来验证代理地址是否有效
    try:
        res = requests.get(url, headers=headers, proxies=proxies, timeout=3)
        print(res)
    except requests.exceptions.Timeout:
        # 超过3秒未返回，则请求超时
        print("请求超时")
        result_code = 0
    else:
        result_code = res.status_code
    # finally:
    #     return res.status_code
    # 返回请求状态
    return result_code

def getHouse(area,ip,start,end):

    f = open(f'./链家数据/链家二手房_重庆_{area}.csv', mode='w', encoding='utf-8', newline='')
    csv_writer = csv.DictWriter(f, fieldnames=[
        '链接',
        '标题',
        '小区',
        '位置',
        '单价',
        '总价',
        '类型',
        '面积',
        '方位',
        '装修',
        '楼层',
        '楼层数',
        '建立时间',
        '信息',
        '关注人数',
        '发布时间',
        '标签',
        '推荐标签'
    ])

    for page in range(start, end):
        # time.sleep(1)
        print(f'正在获取第{page}数据...')
        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36 Edg/126.0.0.0'
        }
        url = f'https://cq.lianjia.com/ershoufang/{area}/pg{page}/'

        proxies = {"http":ip}

        response = requests.get(url=url, headers = headers, proxies=proxies,timeout=3)

        html_data = response.text

        selector = parsel.Selector(html_data)

        #根据标签来选择 CSS label
        divs = selector.css('.sellListContent li')
        print(divs)

        for div in divs:
            #链接
            href = div.css('a::attr(href)').extract_first()
            #标题
            title = div.css('.title a::text').get()
            #区域
            area_list = div.css('.positionInfo a::text').getall()

            if len(area_list) > 0:
                #小区名称
                area_pre = area_list[0]
                #小区位置
                area_suff = area_list[1]

            #总价
            if div.css('.totalPrice span::text').get() != None:
                #总价
                totalPrice = div.css('.totalPrice span::text').get()
                #单价
                unitPrice = div.css('.unitPrice span::text').get().replace('元/平','')
                #房屋信息
                houseInfo = div.css('.houseInfo::text').get()
                houseInfo = div.css('.houseInfo::text').get().split(' | ')
                houseType = houseInfo[0]
                houseArea = houseInfo[1].replace('平米','')
                houseFace = houseInfo[2]
                houseInner = houseInfo[3]
                houseFloor = houseInfo[4][0]
                houseFloor_num = re.findall('\d+', houseInfo[4])[0].replace(' ','')
                houseBuilding = houseInfo[-1]
                if len(houseInfo) == 7:
                    houseDate = houseInfo[5]
                else:
                    houseDate = '未知'
                #关注度
                followInfo = div.css('.followInfo ::text').get().split('/')
                followNum = followInfo[0]
                followDate = followInfo[1]
                #标签
                tags = div.css('.tag span::text').getall()
                #【必看好房】
                goodhouse_tag = div.css('.title span::text').get()

                dit = {
                    '链接' : href,
                    '标题' : title,
                    '小区' : area_pre,
                    '位置' : area_suff,
                    '单价' : unitPrice,
                    '总价' : totalPrice,
                    '类型': houseType,
                    '面积': houseArea,
                    '方位': houseFace,
                    '装修': houseInner,
                    '楼层': houseFloor,
                    '楼层数' : houseFloor_num,
                    '建立时间': houseDate,
                    '信息': houseBuilding,
                    '关注人数': followNum,
                    '发布时间': followDate,
                    '标签': tags,
                    '推荐标签': goodhouse_tag,
                }

                print(dit)
                csv_writer.writerow(dit)

def test():
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36 Edg/126.0.0.0'
    }
    url = 'https://cq.lianjia.com/ershoufang/jiangbei/pg1/'

    response = requests.get(url=url, headers=headers)

    html_data = response.text

    print(html_data)

def readIP():
    csv_reader = csv.reader(open("代理池.txt"))
    for area in area2:
        for row in csv_reader:
            ip = row[0]
            print(fr'当前使用的IP:{ip}')
            print(fr'当前爬取的地区:{area}')
            if test_proxy(ip) == 200:
                getHouse(area,ip,1,100)


if __name__ == '__main__':
    readIP()

