from estate import estate
from settings import settings

import requests
import random
import re
import sys


class FangTianXia(estate.Estate):
    def __init__(self, area, proxy_pool):
        self.__area = area
        self.__proxy_pool = proxy_pool
        self.__headers = self.__get_headers(user_agent=True)
        self.__proxy = None
        self.__timeout = 3 if not hasattr(settings, "TIMEOUT") else settings.TIMEOUT

    @staticmethod
    def __get_headers(**kwargs):
        headers = {}
        if "user_agent" in kwargs and kwargs["user_agent"]:
            headers["User-Agent"] = random.choice(settings.USER_AGENTS)

        if hasattr(settings, "DEFAULT_HEADERS"):
            settings.DEFAULT_HEADERS.update(headers)
            headers = settings.DEFAULT_HEADERS
        return headers

    def __set_proxy(self):
        proxy = self.__proxy_pool.get()
        if proxy:
            self.__proxy = {"https": proxy}
        return True if self.__proxy else False

    def download(self, page_url):
        page_source = ""
        try:
            params = {"headers": self.__headers,
                      "proxies": self.__proxy,
                      "timeout": self.__timeout
                      }

            page_source = requests.get(page_url, **params).text
            if not re.search('<span class="tit_shop">', page_source):
                location_url = re.search('location.href="(.*?)";', page_source).group(1)
                if location_url:
                    print("下载{}地区页面{}的过程中发生了重定向".format(self.__area, page_url))
                    page_source = requests.get(location_url, **params).text
                else:
                    page_source = ""

        except(Exception, ) as e:
            print("下载{}地区的数据出现了异常:{}".format(self.__area, e))
        return page_source

    def parse(self, html):
        results = []
        if re.search('<span class="tit_shop">', html):
            pattern = re.compile('<img class="loadimg".*?data-src="//(.*?)".*?'
                      '<span class="tit_shop">[\n\t]*(.*?)</span>'
                      '.*?<dd class="price_right">.*?<span>(\d+)元/㎡</span>',re.S)
            items = re.findall(pattern, html)
            results = [[item[1], int(item[2])] for item in items]

        return results

    def batch_crawling(self, page_url, pages=50):
        all_results = []
        page_no = 1
        add_proxy_ok = self.__set_proxy()

        def __batch_crawling(start):
            while start <= pages:
                request_url = page_url+str(start)
                page_source = self.download(request_url)
                results = self.parse(page_source)
                if results:
                    all_results.extend(results)
                    print("地区{}的地产数据下载成功，已下载{}页".format(self.__area, len(all_results)))
                    start += 1
                else:
                    print("地区{}的地产数据下载失败，页面url为{}, 重新获取代理".format(self.__area, request_url))
                    if not self.__set_proxy():
                        print("获取代理失败，系统终止爬取，须检查代理服务器")
                        break
        if add_proxy_ok:
            __batch_crawling(page_no)
        return all_results
