# 宁波租房房价爬虫类
import os
import re
import time
import bs4
import requests
import pandas as pd
import random


class nignbo_housing_price_crawler:

    def __init__(self):
        # ip池API
        self.proxy_url = 'http://http1.9vps.com/getip.asp?username=andy9527&pwd=2192c496183c0655a8174fdf2640a347&geshi=1&fenge=1&fengefu=&getnum=1'

        # 一定要设置headers信息
        self.headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36'
        }

        self.cook = {
            'Cookie': ''  # 这里的Cookie信息根据自己浏览器具体情况来设置
        }

        self.savePath = "123.csv"
        if os.path.exists(self.savePath):
            os.remove(self.savePath)

        self.request_count = 0
        self.URL_list = []
        self.proxy = self.get_proxy(self.headers)

        self.get_pageurl()

    def get_proxy(self, headers):
        aaa = requests.get(self.proxy_url, headers=headers).text
        # print(aaa)
        proxy_host = aaa.strip()
        # proxy_host='1.193.175.60:10808'
        # print("proxy_host: ", proxy_host)
        proxy = {
            'http': proxy_host,
            # 'https': proxy_host,
        }
        print("proxy: ", proxy)
        return proxy

    def get_pageurl(self):
        i = 0
        stop_signal = True
        count1 = 0
        while stop_signal:
            result_nan = 0
            if random.randint(1, 4) == 2:
                self.proxy = self.get_proxy(self.headers)
            i += 1
            url = f"https://bh.fang.anjuke.com/loupan/all/p{i}/"  # 安居客 北海
            print(f"request {i} page")
            print(f"request url: ", url)

            resp = requests.get(url, cookies=self.cook, headers=self.headers, proxies=self.proxy)
            time.sleep(random.randint(10, 20))
            # print(resp.text)
            resp.encoding = resp.apparent_encoding
            soup = bs4.BeautifulSoup(resp.text, "lxml")
            zoom_info = soup.find_all(name="div", class_="item-mod")
            if zoom_info == []:
                result_nan += 1
                if "验证码级别" in resp.text:
                    print("网页已启动验证码: 请手动访问解锁 :", url)
                    a = input("操作完成, 回车继续")
                    time.sleep(10)
                print(">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>")
                print("页面请求异常: ", resp.text)
                if result_nan <= 3:
                    if i > 0:
                        i -= 1
                    continue
                else:
                    stop_signal = False
                result_nan = 0

            for item in zoom_info:
                # print(item)

                # 房屋简介
                # zoom_describe = item.a["href"]
                # print(zoom_describe)

                # 房屋超链接
                zoom_link = item.a["href"]
                # print("房屋超链接")
                # print(zoom_link)
                d = re.findall(".+\d+.html$", zoom_link)
                if d:
                    print("get url: ", zoom_link)
                    self.URL_list.append(zoom_link)
                    count1 = 0
                else:
                    count1 += 1
                    if count1 > 4:
                        stop_signal = False

        self.URL_list = list(set(self.URL_list))
        self.get_crawler_infomation(self.URL_list)

    def URL_request(self, url, mode=False):
        a = random.randint(5, 10)
        if a == 7:
            # ip池获取ip
            self.proxy = self.get_proxy(self.headers)
        resp = requests.get(url, cookies=self.cook, headers=self.headers, proxies=self.proxy)
        time.sleep(random.randint(3, 7))
        # print(resp.text)
        resp.encoding = resp.apparent_encoding
        soup = bs4.BeautifulSoup(resp.text, "lxml")
        # print(soup)
        # 通过find_all找到所有div元素
        # find_all(tag, attributes, recursive, text, limit, keywords)
        # find_all（标签、属性、递归、文本、限制、关键词）
        # find(tag, attributes, recursive, text, keywords)
        data_dict = {}
        if mode:
            print("楼盘详情页")
            zoom_info = soup.find_all(name="div", class_="basic-details")
            zoom_info2 = soup.find_all(name="div", class_="hx-list-mod")
            # 异常请求,  等待30分钟后重新请求
            if zoom_info == []:
                if "验证码级别" in resp.text:
                    print("网页已启动验证码: 请手动访问解锁 :", url)
                    a = input("操作完成, 回车继续")

                print("页面请求异常, 重新请求")
                print("requests Fails URL: ", url)
                print("重新请求: ", url)
                print("详情页请求结果: ", resp.text)
                resp.close()
                time.sleep(random.randint(1, 3))
                # ip池获取ip
                self.proxy = self.get_proxy(self.headers)
                resp = requests.get(url, cookies=self.cook, headers=self.headers, proxies=self.proxy)
                time.sleep(random.randint(2, 6))
                resp.encoding = resp.apparent_encoding
                soup = bs4.BeautifulSoup(resp.text, "lxml")
                zoom_info = soup.find_all(name="div", class_="basic-details")
                zoom_info2 = soup.find_all(name="div", class_="hx-list-mod")
                if zoom_info == []:
                    self.proxy = self.get_proxy(self.headers)
                    return
            try:
                for data in zoom_info:
                    house_name = data.h1.text
                    print("楼盘名: ", house_name)
                    data_dict["楼盘名称"] = [house_name]
                    selling_price = data.em.text + data.span.text
                    print("售价: ", selling_price)
                    data_dict["售价"] = [selling_price]
                    zoom_add = data.find_all(name="a", class_="lpAddr-text g-overflow")[0].text
                    print("地址: ", zoom_add)
                    data_dict["地址"] = [zoom_add.replace(' ', '')]
                    zoom_type = data.find_all(name="div", class_="house-item g-overflow")
                    house_type = "房型: "
                    for ty in zoom_type:
                        for tya in ty.find_all('a'):
                            house_type += (tya.text + "/")
                    print(house_type)
                    data_dict["房型"] = [house_type.replace("房型: ", "")]

                    open_time = "开盘/交付日期: "
                    for zoom_time in data.find_all(name="dd"):
                        for i in zoom_time.find_all(name="span"):
                            if "年" in i.text and "月" in i.text:
                                open_time += (i.text + " / ")
                            # date_pattern = "(\d{1,4}年)((([0?][1-9])月)|(([1?][0-2])月)|([1-9]月)?)(([0?][1-9]日)|([1?][0-9]日)|([2?][1-9]日)|([3][0-1]日)?)"
                            # day_pattern = r"(\d{1,4}年)(([0?][1-9])月)([0?][1-9]日)"
                            # a = re.findall(day_pattern, i.text)
                            # print(a)
                    # print(open_time)
                    data_dict["开盘/交付日期"] = [open_time.replace("开盘/交付日期: ", "")]

                    data_dict2 = {}
                    data3 = ""
                    if zoom_info2:
                        for data2 in zoom_info2:
                            for index, a in enumerate(data2.find_all("li")):
                                # data_dict2["楼盘户型名" + str(index + 1)] = ((a.span.text.replace(' ', '')).replace('\n', '')).replace('...', '')
                                data3 += "楼盘户型名" + str(index + 1) + ":" + (((a.span.text.replace(' ', '')).replace('\n', '')).replace('...', ''))
                                # data_dict2["户型" + str(index + 1)] = a.find_all("span", class_="desc-v")[0].text
                                data3 += "户型" + str(index + 1) + ":" + a.find_all("span", class_="desc-v")[0].text
                                # data_dict2["销售状态" + str(index + 1)] = a.find_all("i", class_="comm-stat on-sale")[0].text
                                try:
                                    data3 += "销售状态" + str(index + 1) + ":" + a.find_all("i", class_="comm-stat on-sale")[0].text
                                except:
                                    try:
                                        data3 += "销售状态" + str(index + 1) + ":" + a.find("i", class_="comm-stat on-sale").text
                                    except:
                                        data3 += "销售状态" + str(index + 1) + ":"
                                # data_dict2["该房型售价" + str(index + 1)] = a.find_all("span", class_="desc-total-1")[0].text
                                try:
                                    data3 += "该房型售价" + str(index + 1) + ":" + a.find("span", class_="desc-total-1").text
                                except:
                                    try:
                                        data3 += "该房型售价" + str(index + 1) + ":" + a.find_all("span", class_="desc-total-1")[0].text
                                    except:
                                        try:
                                            data3 += "该房型售价" + str(index + 1) + ":" + a.find_all("span", class_="desc-total-1").text
                                        except:
                                            data3 += "该房型售价" + str(index + 1) + ":" + ""
                                # data_dict2["面积大小" + str(index + 1)] = a.find_all("span", class_="desc-k area-k")[0].text
                                data3 += "面积大小" + str(index + 1) + ":" + a.find_all("span", class_="desc-k area-k")[0].text
                    data_dict["URL"] = [url + "\n"]
                    data_dict["楼盘户型"] = [data3]
            except:
                resp.close()
                return
            # print(data_dict)
            try:
                if data_dict:
                    # print(data_dict.values()[0])
                    df = pd.DataFrame(data_dict, columns=data_dict.keys())
                    print(">>>>>>>>>")
                    df.set_index(["楼盘名称"], inplace=True)
                    if os.path.exists(self.savePath):
                        df.to_csv(self.savePath, encoding="utf-8", index=True, header=0, mode='a')
                    else:
                        df.to_csv(self.savePath, encoding="utf-8", mode='a')
                    time.sleep(1)
                    resp.close()
                    return zoom_info
                else:
                    resp.close()
                    time.sleep(1)
                    return
            except:
                resp.close()
                time.sleep(1)
                return

        else:
            zoom_info = soup.find_all(name="div", class_="item-mod")
            if zoom_info == []:
                print("页面请求异常, 重新请求")
                resp.close()
                time.sleep(10)
                proxy = self.get_proxy(self.headers)
                print("requests Fails URL: ", url)
                print("重新请求: ", url)
                resp = requests.get(url, cookies=self.cook, headers=self.headers, proxies=proxy)
                time.sleep(10)
                if resp:
                    pass
                else:
                    return
            # 获取楼盘连接
            URLlist = []
            for item in zoom_info:
                # print(item)

                # 房屋简介
                # zoom_describe = item.a["href"]
                # print(zoom_describe)

                # 房屋超链接
                zoom_link = item.a["href"]
                # print("房屋超链接")
                # print(zoom_link)
                d = re.findall(".+\d+.html$", zoom_link)
                if d:
                    URLlist.append(zoom_link)
        # print(zoom_info)
        # # items-name: 楼盘名 list-map:地址地图  building-area:面积
        # print(soup.find_all("span", {"class": {"items-name", "list-map", "building-area"}}))
        # if zoom_info:
        #     break


    def get_crawler_infomation(self, url):
        if isinstance(url, str):
            print("request URL: ", url)
            zoom_info = self.URL_request(url, mode=False)
            # 获取楼盘连接
            URLlist = []
            for item in zoom_info:
                # print(item)

                # 房屋简介
                # zoom_describe = item.a["href"]
                # print(zoom_describe)

                # 房屋超链接
                zoom_link = item.a["href"]
                # print("房屋超链接")
                # print(zoom_link)
                d = re.findall(".+\d+.html$", zoom_link)
                if d:
                    URLlist.append(zoom_link)

            #     # 房屋规模、楼层、售楼人信息
            #     zoom_details_find = item.p.text.strip()
            #     pat = re.compile(r'[\u4e00-\u9fa50-9]+')
            #     zoom_details_find_list = pat.findall(zoom_details_find)
            #     zoom_details_final_result = '-'.join(zoom_details_find_list)
            #
            #     # 房屋地址
            #     zoom_address_find = item.find_all(name="address", class_="details-item")[0].text.strip()
            #     zoom_address_result_list = pat.findall(zoom_address_find)
            #     zoom_address_final_result = "-".join(zoom_address_result_list)
            #
            #     # 房屋售价
            #     zoom_price = item.find_all(name="strong")[0].text
            #
            #     # 打印爬取信息
            #     print("打印爬取信息")
            #     # print(zoom_describe, zoom_link, zoom_details_final_result, zoom_address_final_result, zoom_price)
            #
            #     time.sleep(0.2)
            #     pass
            # pass
            print(URLlist)
            URLlist = list(set(URLlist))
            self.get_crawler_infomation(URLlist)

        # 楼盘详细页面浏览
        if isinstance(url, list):
            while url:
                url_index = random.randint(0, len(url)-1)
                URL = url[url_index]
                print("request URL: ", URL)
                self.URL_request(URL, mode=True)
                url.remove(URL)

if __name__ == '__main__':
    # 设置url
    # url = "https://dg.lianjia.com/zufang/rs/"  # 链家
    # url = "https://yulinshi.anjuke.com/"  # 安居客 玉林
    # url = "https://yulin.58.com/xinfang/?from=from_fc_xf_icon&PGTID=0d200001-0092-1469-e70a-90bd1b3dea58&ClickID=1"  # 58
    # url = "https://nn.fang.ke.com/loupan"  # 贝壳
    # url = u"https://bh.fang.anjuke.com/?from=navigation"

    # url = ["https://bh.fang.anjuke.com/loupan/469188.html?from=AF_RANK_1"]
    # nignbo_housing_price_crawler().get_crawler_infomation(url)
    # time.sleep(10)

    obj = nignbo_housing_price_crawler()

