# -*- coding=utf-8 -*-
import csv
import time

import requests
from bs4 import BeautifulSoup
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt

global DATA_PAGES
global BASE_URL
global SLEEP_SEC

# 房产网站的基础URL
BASE_URL = 'https://dg.lianjia.com/ershoufang/pg{}/'
# 抓取多少页数据
DATA_PAGES = 100
# 请求频率,多少秒一次
SLEEP_SEC = 0.5


class Lianjia():
    def __init__(self, url, header):
        self.url = url
        self.header = header

    def get_page(self):
        htmlList = []
        for i in range(DATA_PAGES):
            url = self.url.format(i + 1)
            print(url)
            resp = requests.get(url, params=self.header)
            html = resp.text
            htmlList.append(html)
            # 请求频率,最好1s以上,ip问题,建议
            time.sleep(SLEEP_SEC)
        return htmlList

    def parse_page(self, htmlList):
        lst = []
        for html in htmlList:
            soup = BeautifulSoup(html, 'html.parser')
            ul = soup.find('ul', class_='sellListContent')

            # 在ul中获取所有的li标签
            li_list = ul.find_all('li')  # print(len(li_list))#print(li_list)
            print(len(li_list))
            print('*' * 60)
            # 遍历
            for item in li_list:
                dic = {}
                title = item.find('div', class_='title').text  # 标题
                house_info = item.find('div', class_='houseInfo').text  # 房屋描述
                total_price = item.find('div', class_='totalPrice').text  # 总价
                unit_price = item.find('div', class_='unitPrice').text  # 单价
                dic['title'] = title
                dic['houseInfo'] = house_info
                dic['totalPrice'] = total_price
                dic['unitPrice'] = unit_price
                lst.append(dic)
        return lst

    def savaData(self, lst):
        # pass
        head = ['title', 'houseInfo', 'totalPrice', 'unitPrice', 'attention']
        with open('dg.csv', 'w', encoding='utf-8', newline='') as f:
            writer = csv.DictWriter(f, head)
            writer.writeheader()
            writer.writerows(lst)

    def run(self):
        res1 = self.get_page()
        res2 = self.parse_page(res1)
        self.savaData(res2)


def resolveData(filePath):
    # 这两行代码解决 plt 中文显示的问题
    plt.rcParams['font.sans-serif'] = ['SimHei']
    plt.rcParams['axes.unicode_minus'] = False
    data = pd.read_csv(filePath)
    data.head()
    data['totalPrice'] = data['totalPrice'].apply(lambda x: x[:-1])
    data[['totalPrice']] = data[['totalPrice']].values.astype(float)
    data['rank1'] = pd.cut(data['totalPrice'], [0, 200, 400, 600, 800, 1000, 1200, 1400], labels=list('ABCDEFG'))
    # data['评分等级'] = pd.cut(data['评分'],[0,300,500,800,1000],labels=list('DCBA'))
    data.head()
    x = list('ABCDEFG')
    x1 = ['0~200w', '200~400w', '400~600w', '600~800w', '800~1000w', '1000~1200w', '1200~1400w']
    y = []
    for i in x:
        num = len(data[data['rank1'] == i])
        y.append(num)
    plt.figure(figsize=(8, 5), dpi=100)
    plt.bar(x1, y, color='red')
    plt.title('各种价位数量对比')
    plt.xlabel('价位')
    plt.ylabel('数量')
    # plt.legend() # 显示图例
    plt.show()


if __name__ == '__main__':
    start = time.time()
    base_url = BASE_URL
    headers = '''Connection: keep-alive
        Cache-Control: max-age=0
        sec-ch-ua: " Not A;Brand";v="99", "Chromium";v="90", "Google Chrome";v="90"
        sec-ch-ua-mobile: ?0
        Upgrade-Insecure-Requests: 1
        User-Agent: Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.212 Safari/537.36
        Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9
        Sec-Fetch-Site: none
        Sec-Fetch-Mode: navigate
        Sec-Fetch-User: ?1
        Sec-Fetch-Dest: document
        Accept-Encoding: gzip, deflate, br
        Accept-Language: zh-CN,zh;q=0.9'''
    lj = Lianjia(base_url, headers)
    lj.run()
    resolveData('dg.csv')
    end = time.time()
    print('耗费时间:%d 秒', (end - start) / 1000)
