import requests
from pyquery import PyQuery as pq
import threading
import logging
import time, random
import json

logger = logging.getLogger()
logging.basicConfig(level=logging.INFO, filename='log.txt')
logging.basicConfig(level=logging.WARNING, filename='wanging.txt')


class GANJI(object):
    """
        用来爬取赶集网，租房每个详情页的信息的，
        但是赶集网老是需要验证，使用代理也不行，
        弃用。。。。
    """
    def __init__(self, start_url):
        self.headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.120 Safari/537.36'}
        self.start_url = start_url
        self.request_s = requests.session()

        self.proxy_list = list()        # 存放ip代理的列表
        self.item_url_list = list()     # 存放详情页url的列表
        self.item_url_list_lock = threading.Lock()  # 获取url列表中的信息的锁
        self.save_info_lock = threading.Lock()      # 保存info信息的锁

    def get_item_page_info(self):

        while True:
            if len(self.item_url_list) <= 0:
                print('no url ' + '*' * 50)
                break

            self.item_url_list_lock.acquire()
            url = self.item_url_list.pop()
            # print('get url [{}]'.format(url))
            logging.info('get proxies url is [{}]'.format(url))
            self.item_url_list_lock.release()
            response = self.request_s.get(url, headers=self.headers, timeout=10, proxies=random.choice(self.proxy_list))
            items_doc = pq(response.content.decode())

            item_doc = items_doc('.card-top')  # 主要内容所在的div
            item_dict = dict(
                title=item_doc('.card-title i').text(),  # 标题
                price=item_doc('.price-wrap span:first').text(),  # 价格
                pay_method=item_doc('.price-wrap span:last').text(),  # 付款方式
                item_info_type=item_doc('.item.f-fl').eq(0)('.content').text(),  # 户型
                item_info_are_and_request=item_doc('.item.f-fl').eq(1)('.content').text(),  # 面积和要求
                item_info_face=item_doc('.item.f-fl').eq(2)('.content').text(),  # 面向，朝向
                item_info_floor=item_doc('.item.f-fl').eq(3)('.content').text(),  # 楼层
                item_info_fitment=item_doc('.item.f-fl').eq(4)('.content').text(),  # 装修情况
                item_info_hous=item_doc('.er-list-two.f-clear .er-item .content').eq(0).text(),  # 小区名字
                item_info_metro=item_doc('.er-list-two.f-clear .er-item .content').eq(1).text(),  # 地铁
                item_info_address=item_doc('.er-list-two.f-clear .er-item .content').eq(2).text(),  # 住址
            )
            if '' in item_dict.values():
                logging.warning('[{}] is fail'.format(url))

            self.save_info_lock.acquire()
            self.save_info(item_dict)
            self.save_info_lock.release()

    def get_lists_page_info(self):  # 首页

        response = self.request_s.get(self.start_url, headers=self.headers, proxies=random.choice(self.proxy_list))
        response = pq(response.content.decode())

        list_div = response('.f-main-list .f-list-item.ershoufang-list').items()
        max_pages_number = response('.f-page .pageBox .pageBox a').eq(-2).text()  # 最大页码
        for item_div in list_div:
            item_page_url = item_div('.dd-item.title a').attr('href')   # 获取详情页的url,并放到队列中
            # print('add url [{}]'.format(item_page_url))
            logging.info('add url [{}]'.format(item_page_url))
            if item_page_url.startswith('//'):
                # 'http://gz.ganji.com/zufang/40097900959892x.shtml'
                item_page_url = 'http:' + item_page_url
            self.item_url_list.append(item_page_url)

        th = []
        t2 = threading.Thread(target=self.get_other_item, args=(max_pages_number,))
        th.append(t2)
        for i in range(99):
            t1 = threading.Thread(target=self.get_item_page_info)
            th.append(t1)
        #
        for t in th:
            t.start()

        for t in th:
            t.join()

    def get_other_item(self, max_pages_number):  # 第二页以后
        print(max_pages_number)
        for page in range(2, int(max_pages_number) + 1):
            response = self.request_s.get(self.start_url + 'pn{}/'.format(page), headers=self.headers, proxies=random.choice(self.proxy_list)).content.decode()
            # print(response)
            response = pq(response)
            list_div = response('.f-main-list .f-list-item.ershoufang-list').items()
            for item_div in list_div:
                item_page_url = item_div('.dd-item.title a').attr('href')  # 获取详情页的url,并放到队列中
                # print('add url [{}]'.format(item_page_url))
                logging.info('add url [{}]'.format(item_page_url))
                if item_page_url.startswith('//'):
                    # 'http://gz.ganji.com/zufang/40097900959892x.shtml'
                    item_page_url = 'http:' + item_page_url
                self.item_url_list.append(item_page_url)

    def save_info(self, item_dict):
        with open('__gz_zufang_ganji_info.json', 'a+', encoding='utf8') as f:
            f.write(json.dumps(item_dict, ensure_ascii=False, indent=2) + ',' + '\n')

    def read_good_ip_file(self):
        with open('good_ip.json', 'r', encoding='utf8') as f:
            self.proxy_list = json.load(fp=f)

    def main(self):
        self.read_good_ip_file()   # 先读取有用的代理ip
        self.get_lists_page_info()
        # self.save_info()


if __name__ == '__main__':
    gj = GANJI(start_url='http://gz.ganji.com/zufang/')
    gj.main()
