import requests
from pyquery import PyQuery as pq
import threading
import logging
import time, random
import json, os

_path = os.path.dirname(__file__)

logger = logging.getLogger()
logging.basicConfig(level=logging.INFO, filename=os.path.join(_path, 'log/log.txt'))


class GANJI(object):
    """
        用来爬取赶集网，租房每个详情页的信息的，
        但是赶集网老是需要验证，使用代理也不行，
        只抓取列表页的数据。。。。
    """
    def __init__(self, start_url, proxy_ip_file,result_file):
        self.headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.120 Safari/537.36'}
        self.start_url = start_url
        self.result_file = result_file
        self.proxy_ip_file = proxy_ip_file
        self.request_s = requests.session()

        self.proxy_list = list()        # 存放ip代理的列表

    def get_lists_page_info(self, list_div):

        for item_doc in list_div:

            item_dict = dict(
                title=item_doc('.dd-item.title a').text(),  # 标题
                item_info_type=item_doc('.dd-item.size span').eq(0).text(),  # 户型
                item_info_are=item_doc('.dd-item.size span').eq(2).text(),  # 面积
                item_info_face=item_doc('.dd-item.size span').eq(4).text(),  # 面向，朝向
                item_info_fitment=item_doc('.dd-item.size span').eq(6).text(),  # 装修情况
                item_info_address1=item_doc('.dd-item.address').eq(0)('a').eq(0).text(),  # 一级地址
                item_info_address2=item_doc('.dd-item.address').eq(0)('a').eq(1).text(),  # 二级地址
                item_info_publisher=item_doc('.dd-item.address').eq(1)('span.address-eara').text(),  # 发布人
                item_info_feature=item_doc('.dd-item.feature span').text(),  # 地铁情况
                price=item_doc('.price span.num').text() + item_doc('.price span.yue').text(),  # 价格

            )

            self.save_info(item_dict)

    def get_page(self):

        response = self.request_s.get(self.start_url.format(1), headers=self.headers, proxies=random.choice(self.proxy_list))
        response = pq(response.content.decode())

        list_div = response('.f-main-list .f-list-item.ershoufang-list').items()
        self.get_lists_page_info(list_div=list_div)

        max_pages_number = response('.f-page .pageBox .pageBox a').eq(-2).text()  # 最大页码
        print(max_pages_number)

        for i in range(2, int(max_pages_number)+1):
            time.sleep(1)
            logging.info('{}'.format(self.start_url.format(i)))
            print(self.start_url.format(i))
            response = self.request_s.get(self.start_url.format(i), headers=self.headers,
                                          proxies=random.choice(self.proxy_list))
            response = pq(response.content.decode())

            list_div = response('.f-main-list .f-list-item.ershoufang-list').items()
            self.get_lists_page_info(list_div=list_div)

    def save_info(self, item_dict):
        with open(self.result_file, 'a+', encoding='utf8') as f:
            f.write(json.dumps(item_dict, ensure_ascii=False, indent=2) + ',' + '\n')

    def read_good_ip_file(self):
        with open(self.proxy_ip_file, 'r', encoding='utf8') as f:
            self.proxy_list = json.load(fp=f)

    def main(self):
        self.read_good_ip_file()   # 先读取有用的代理ip
        self.get_page()  # 获取信息


if __name__ == '__main__':
    # start_url = 'http://gz.ganji.com/zufang/pn{}/'  # 租房的url
    # start_url = 'http://gz.ganji.com/hezu/pn{}/'      # 合租的url
    start_url = 'http://gz.ganji.com/ershoufang/pn{}/'      # 二手房的url
    proxy_ip_file = os.path.join(_path, 'proxy_ip/good_ip.json')
    result_file = os.path.join(_path, 'result/gz_ershoufang_ganji_info.json')

    gj = GANJI(start_url=start_url, proxy_ip_file=proxy_ip_file, result_file=result_file)
    gj.main()
