# -*- coding: utf-8 -*-

# Define here the models for your spider middleware
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html

from scrapy import signals
from scrapy.downloadermiddlewares.retry import RetryMiddleware, response_status_message

import re
import json
import time
import random
import logging
import requests
import telnetlib
import urllib.parse
from lxml import etree
from urllib.parse import unquote
from FDC_spider.settings import USER_AGENTS, PROXY, PROXY_URL
from FDC_spider.utils.proxy_to_redis_yueyang import PROXY_YY_KEY

logger = logging.getLogger(__name__)


class FdcSpiderSpiderMiddleware(object):
    # Not all methods need to be defined. If a method is not defined,
    # scrapy acts as if the spider middleware does not modify the
    # passed objects.

    @classmethod
    def from_crawler(cls, crawler):
        # This method is used by Scrapy to create your spiders.
        s = cls()
        crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
        return s

    def process_spider_input(self, response, spider):
        # Called for each response that goes through the spider
        # middleware and into the spider.

        # Should return None or raise an exception.
        return None

    def process_spider_output(self, response, result, spider):
        # Called with the results returned from the Spider, after
        # it has processed the response.

        # Must return an iterable of Request, dict or Item objects.
        for i in result:
            yield i

    def process_spider_exception(self, response, exception, spider):
        # Called when a spider or process_spider_input() method
        # (from other spider middleware) raises an exception.

        # Should return either None or an iterable of Request, dict
        # or Item objects.
        pass

    def process_start_requests(self, start_requests, spider):
        # Called with the start requests of the spider, and works
        # similarly to the process_spider_output() method, except
        # that it doesn’t have a response associated.

        # Must return only requests (not items).
        for r in start_requests:
            yield r

    def spider_opened(self, spider):
        spider.logger.info('Spider opened: %s' % spider.name)


class FdcSpiderDownloaderMiddleware(object):
    # Not all methods need to be defined. If a method is not defined,
    # scrapy acts as if the downloader middleware does not modify the
    # passed objects.

    @classmethod
    def from_crawler(cls, crawler):
        # This method is used by Scrapy to create your spiders.
        s = cls()
        crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
        return s

    def process_request(self, request, spider):
        # Called for each request that goes through the downloader
        # middleware.

        # Must either:
        # - return None: continue processing this request
        # - or return a Response object
        # - or return a Request object
        # - or raise IgnoreRequest: process_exception() methods of
        #   installed downloader middleware will be called
        return None

    def process_response(self, request, response, spider):
        # Called with the response returned from the downloader.

        # Must either;
        # - return a Response object
        # - return a Request object
        # - or raise IgnoreRequest
        return response

    def process_exception(self, request, exception, spider):
        # Called when a download handler or a process_request()
        # (from other downloader middleware) raises an exception.

        # Must either:
        # - return None: continue processing this exception
        # - return a Response object: stops process_exception() chain
        # - return a Request object: stops process_exception() chain
        pass

    def spider_opened(self, spider):
        spider.logger.info('Spider opened: %s' % spider.name)


class UserAgentMiddleware(object):
    def process_request(self, request, spider):
        agent = random.choice(USER_AGENTS)
        request.headers['User-Agent'] = agent


class InnerNetProxyMiddleware(object):
    def process_request(self, request, spider):
        while True:
            proxy = random.choice(PROXY)
            try:
                proxy_ip = proxy.split(':')[0]
                proxy_port = proxy.split(':')[1]
                telnetlib.Telnet(proxy_ip, proxy_port, timeout=3)
            except Exception as e:
                print(proxy, e)
                continue
            else:
                request.meta['proxy'] = 'http://' + proxy if request.url.startswith("http://") else 'https://' + proxy
                break


class OuterNetProxyMiddleware_Old(object):
    def process_request(self, request, spider):
        temp_re = re.compile(r'\s')
        while True:
            try:
                resp = requests.get(PROXY_URL)
                proxy_ip = resp.content.decode()
                proxy_dict = {
                    "http": "http://hifo:hifo1996@{}:65500".format(proxy_ip),
                    "https": "https://hifo:hifo1996@{}:65500".format(proxy_ip)
                }
                check_resp = requests.get('http://icanhazip.com', proxies=proxy_dict, timeout=0.8)
                resp_ip = temp_re.sub('', check_resp.content.decode())
                assert proxy_ip == resp_ip
            except Exception as e:
                continue
            else:
                request.meta['proxy'] = proxy_dict['http'] if request.url.startswith("http://") else proxy_dict['https']
                break


class OuterNetProxyMiddleware(object):
    def process_request(self, request, spider):
        key_name, proxy_ip = spider.redis_conn.brpop('proxy', timeout=30 * 60)
        proxy_dict = {
            "http": "http://hifo:hifo1996@{}:65500".format(proxy_ip),
            "https": "https://hifo:hifo1996@{}:65500".format(proxy_ip)
        }
        request.meta['proxy'] = proxy_dict['http'] if request.url.startswith("http://") else proxy_dict['https']


class NanjingProxyMiddleware(object):
    def process_request(self, request, spider):
        key_name, proxy_ip = spider.redis_conn.brpop('proxy', timeout=24 * 60 * 60)
        proxy_dict = {
            "http": "http://hifo:hifo1996@{}:65500".format(proxy_ip),
            "https": "https://hifo:hifo1996@{}:65500".format(proxy_ip)
        }
        if 'imgmake' not in request.url:
            request.meta['proxy'] = proxy_dict['http'] if request.url.startswith("http://") else proxy_dict['https']


class SfpmProxyMiddleware(object):
    def process_request(self, request, spider):
        if spider.name == 'sfpm':
            key_name, proxy_ip = spider.redis_conn.brpop('proxy', timeout=24 * 60 * 60)
            proxy_dict = {
                "http": "http://hifo:hifo1996@{}:65500".format(proxy_ip),
                "https": "https://hifo:hifo1996@{}:65500".format(proxy_ip)
            }
            request.meta['proxy'] = proxy_dict['http'] if request.url.startswith("http://") else proxy_dict['https']


class SfpmCookiesMiddleware(object):
    def process_request(self, request, spider):
        if spider.name == 'sfpm':
            if 'getSfDealConfirm' in request.url:
                cookie_str = 'thw=cn; hng=CN%7Czh-CN%7CCNY%7C156; sgcookie=Eb%2FRv87wf6IsBIeHA09Uw; enc=HDbcdwpFGpXCTo0uorx%2B7nzNDKigBghEZCSP3tDRXQT%2B7Py6ELbolU963oQb8g4UvwV6U6AuftDQ3fRMF27avQ%3D%3D; tracknick=; sf-hide=1; miid=487354291836338112; UM_distinctid=172c603d09f272-02145e5c036478-3c3f5a0c-1fa400-172c603d0a0767; cna=25eVF1QTUU4CAbfmLefqOCS7; _m_h5_tk=08c7047e3f7bd82434e3ec589457c653_1599537839512; _m_h5_tk_enc=b58ff328fa553a174e4a4db9387d70fd; t=87258c33ad25c8ca1bdf18c0214b5c35; xlly_s=1; v=0; cookie2=117817b7d9dcde9ce6e1e777f2f60bdd; _tb_token_=e337e77d4b836; uc1=cookie14=UoTV5YqYlNWD4w%3D%3D; tfstk=cEJfByG_ar4bvRGa3niP717SjDWVamrCfj_DcIiZ1RM-Vmtfysx7LwNiEC4Te6I5.; l=eBaLIzEnqHT1CZNXBO5Cnurza77TmIObzsPzaNbMiInca63RZFZbANQ4BQnHodtjgtfvxetzaMr08RhMSQUd0HMLB_NQ3t4xZxJw-; isg=BKqqC4MMgT3MrQ9luR0Q2Svy-xBMGy51G3fyRDRiWv2BZ0ghHKgKhdLR95P7l6YN'
                cookie_dict = {i.split("=")[0]: i.split("=")[1] for i in cookie_str.split("; ")}
                request.cookies = cookie_dict


class CustomRetryMiddleware(RetryMiddleware):
    def process_response(self, request, response, spider):
        if request.meta.get('dont_retry', False):
            return response
        # 2021/01/18 by ZSQ for 中山市网站翻页机制更改，有时效性，需进行处理
        if spider.name == 'zhongshan':
            spider.flag = True
            if request.method == 'GET' and request.url == 'http://113.106.13.237:82/' and spider.flag:
                spider.get_request_obj = request.copy()
                spider.flag = False
            if request.method == 'POST':
                retries = request.meta.get('retry_times', 0) + 1
                if retries >= 8:
                    regex = re.compile(r'PageNavigator1%24txtNewPageIndex=(\d+)')
                    page_num = int(regex.findall(request.body.decode())[0])
                    spider.page_num_set.remove(page_num)
                    request = spider.get_request_obj
                    reason = response_status_message(response.status)
                    return self._retry(request, reason, spider) or response
        if response.status in self.retry_http_codes:
            reason = response_status_message(response.status)
            return self._retry(request, reason, spider) or response
        if spider.name == 'beijing':
            if request.method == 'POST':
                if request.url == 'http://zjw.beijing.gov.cn/eportal/ui?pageId=307670':
                    tr_li = response.xpath("//th[text()='项目名称']/../following-sibling::tr")
                    if len(tr_li) < 1:
                        formdata_str = unquote(request.body.decode())
                        data = {fd_str.split('=')[0]: fd_str.split('=')[1] for fd_str in formdata_str.split('&')}
                        current_page_num = data['currentPage']
                        estate_type = data['rblFWType']
                        retries = request.meta.get('retry_times', 0) + 1
                        logger.error(
                            '{}:{}房第{}页楼盘列表信息获取失败，尝试重试，第{}次重试：'.format(spider.name, estate_type, current_page_num,
                                                                       retries))
                        reason = response_status_message(response.status)
                        return self._retry(request, reason, spider) or response
                url_dict = urllib.parse.parse_qs(request.url.split('?')[1])
                row_count = url_dict.get('rowcount', None)
                if row_count:
                    tr_li = response.xpath("//th[text()='楼盘表']/../following-sibling::tr")
                    if len(tr_li) < 1:
                        retries = request.meta.get('retry_times', 0) + 1
                        logger.error(
                            '{}:{}楼栋列表信息获取失败，尝试重试，第{}次重试：'.format(spider.name, request.url, retries))
                        reason = response_status_message(response.status)
                        return self._retry(request, reason, spider) or response

            if request.method == 'GET' and request.url != 'http://zjw.beijing.gov.cn/bjjs/fwgl/fdcjy/index.shtml':
                url_dict = urllib.parse.parse_qs(request.url.split('?')[1])
                project_id = url_dict.get('projectID', None)
                row_count = url_dict.get('rowcount', None)
                building_id = url_dict.get('buildingId', None)
                if project_id:
                    project_name = response.xpath("//td[@id='项目名称']/text()").extract_first()
                    project_name = project_name.strip() if project_name else None
                    estate_address = response.xpath("//td[@id='坐落位置']/text()").extract_first()
                    estate_address = estate_address.strip() if estate_address else None
                    if not project_name and not estate_address:
                        retries = request.meta.get('retry_times', 0) + 1
                        logger.error(
                            '{}:{}楼盘信息获取失败,尝试重试，第{}次重试：'.format(spider.name, request.url, retries))
                        reason = response_status_message(response.status)
                        return self._retry(request, reason, spider) or response
                if row_count:
                    tr_li = response.xpath("//th[text()='楼盘表']/../following-sibling::tr")
                    if len(tr_li) < 1:
                        retries = request.meta.get('retry_times', 0) + 1
                        logger.error(
                            '{}:{}楼栋列表信息获取失败，尝试重试，第{}次重试：'.format(spider.name, request.url, retries))
                        reason = response_status_message(response.status)
                        return self._retry(request, reason, spider) or response
                if building_id:
                    building_total_area = response.xpath(
                        "//font[contains(text(),'地上')]/../font[1]/text()").extract_first()
                    if not building_total_area:
                        retries = request.meta.get('retry_times', 0) + 1
                        logger.error(
                            '{}:{}楼栋信息(房屋列表)获取失败，尝试重试，第{}次重试：'.format(spider.name, request.url, retries))
                        reason = response_status_message(response.status)
                        return self._retry(request, reason, spider) or response
        if spider.name == 'nanning':
            resp_dict = json.loads(response.body.decode())
            if resp_dict['code'] != '10000' or resp_dict['msg'] == '网络繁忙':
                retries = request.meta.get('retry_times', 0) + 1
                logger.error(
                    '{}: {}  页面信息获取失败，尝试重试，第{}次重试，error：{}：'.format(spider.name, request.url,
                                                                    retries, dict(status_code=resp_dict['code'],
                                                                                  msg=resp_dict['msg'])))
                reason = response_status_message(response.status)
                return self._retry(request, reason, spider) or response
        if spider.name == 'nanjing' and 'imgmake' in request.url:
            retries = request.meta.get('retry_times', 0) + 1
            try:
                ret = response.body.decode()
                assert '非法访问' in ret
            except:
                pass
            else:
                logger.error(
                    '{}:{}  图片获取失败，尝试重试，第{}次重试：'.format(spider.name, request.url, retries))
                reason = response_status_message(response.status)
                return self._retry(request, reason, spider) or response
        if spider.name == 'xuzhou':
            try:
                resp = json.loads(response.body.decode())
                if isinstance(resp, dict):
                    obj = resp.get('obj', None)
                else:
                    obj = resp
                assert obj
            except:
                retries = request.meta.get('retry_times', 0) + 1
                logger.warning(
                    '{}:{}  {}  获取数据失败，尝试重试，第{}次重试'.format(spider.name, request.url, request.body.decode(),
                                                           retries))
                reason = response_status_message(response.status)
                return self._retry(request, reason, spider) or response
        if 'hid' in request.url and spider.name == 'ganzhou':
            try:
                resp_dict = json.loads(response.body.decode(errors='ignore'))
                succeed = resp_dict['Succeed']
                data_li = resp_dict['Data']
                assert succeed and len(data_li)
            except:
                retries = request.meta.get('retry_times', 0) + 1
                logger.warning(
                    '{}:{}  {}  获取数据失败，尝试重试，第{}次重试'.format(spider.name, request.url, request.body.decode(),
                                                           retries))
                reason = response_status_message(response.status)
                return self._retry(request, reason, spider) or response
        if spider.name == 'wuhan' and 'ShowPrice' in request.url:
            try:
                flag = response.xpath("//h1[contains(text(),'异常')]/text()").extract_first()
                assert not flag
            except:
                retries = request.meta.get('retry_times', 0) + 1
                logger.warning(
                    '{}:{}  {}   获取房号数据失败，尝试重试，第{}次重试'.format(spider.name, request.url, request.body.decode(),
                                                              retries))
                reason = response_status_message(response.status)
                request._url = 'http://119.97.201.22:8080/chktest2.aspx?gid=' + \
                               request._url.split('=', maxsplit=1)[1].split('&')[0]
                return self._retry(request, reason, spider) or response
        # 2020/08/25 by ZSQ for 石家庄网站改版，房号详情页存在错误响应
        if spider.name == 'shijiazhuang' and 'showhouse' in request.url:
            try:
                resp_str = response.body.decode()
                assert '服务器忙' not in resp_str
            except:
                retries = request.meta.get('retry_times', 0) + 1
                logger.warning(
                    '{}:{}  {}  获取数据失败，尝试重试，第{}次重试'.format(spider.name, request.url, request.body.decode(),
                                                           retries))
                reason = response_status_message(response.status)
                return self._retry(request, reason, spider) or response
        # 2020/09/24 by ZSQ for 司法拍卖网站反爬升级，省市区三级页面相同ip访问需登录验证
        if spider.name == 'sfpm' and 'item_list' in request.url:
            try:
                resp_str = str(response.body, encoding='gbk', errors='ignore')
                assert 'x5referer' not in resp_str
            except:
                retries = request.meta.get('retry_times', 0) + 1
                logger.warning(
                    '{}:{}  司法拍卖 省市区 数据获取失败，需登录验证，更换代理ip尝试重试，第{}次重试'.format(spider.name, request.url, retries))
                reason = response_status_message(response.status)
                return self._retry(request, reason, spider) or response
        return response

    def process_exception(self, request, exception, spider):
        if isinstance(exception, self.EXCEPTIONS_TO_RETRY) \
                and not request.meta.get('dont_retry', False):
            return self._retry(request, exception, spider)


class GanzhouProxyMiddleware(object):
    def process_request(self, request, spider):
        key_name, proxy_ip = spider.redis_conn.brpop('proxy', timeout=30 * 60)
        proxy_dict = {
            "http": "http://hifo:hifo1996@{}:65500".format(proxy_ip),
            "https": "https://hifo:hifo1996@{}:65500".format(proxy_ip)
        }
        request.meta['proxy'] = proxy_dict['http'] if request.url.startswith("http://") else proxy_dict['https']
        if 'hid' in request.url:
            url = 'http://218.64.195.220:333/InternalWebForms/Item/lpbcx.aspx'
            agent = random.choice(USER_AGENTS)
            headers = {'User-Agent': agent}
            resp = requests.get(url=url, headers=headers,
                                proxies={"http": "http://hifo:hifo1996@{}:65500".format(proxy_ip), }, timeout=20)
            html = etree.HTML(resp.content.decode())
            h_id = html.xpath("//input[@id='h_id']/@value")[0]
            request._url = self.remake_url(request._url, h_id)

    @staticmethod
    def remake_url(_url, _hid):
        regex = re.compile(r'hid=(.*)')
        return regex.sub('hid=' + _hid, _url)


class DongguanOuterNetProxyMiddleware(object):
    def process_request(self, request, spider):
        if spider.name == 'dongguan' and 'ProjectInfo.aspx?new=1' not in request.url:
            key_name, proxy_ip = spider.redis_conn.brpop('proxy', timeout=30 * 60)
            proxy_dict = {
                "http": "http://hifo:hifo1996@{}:65500".format(proxy_ip),
                "https": "https://hifo:hifo1996@{}:65500".format(proxy_ip)
            }
            request.meta['proxy'] = proxy_dict['http'] if request.url.startswith("http://") else proxy_dict['https']


class DongguanInnerNetProxyMiddleware(object):
    def process_request(self, request, spider):
        if spider.name == 'dongguan' and 'ProjectInfo.aspx?new=1' not in request.url:
            while True:
                proxy = random.choice(PROXY)
                try:
                    proxy_ip = proxy.split(':')[0]
                    proxy_port = proxy.split(':')[1]
                    telnetlib.Telnet(proxy_ip, proxy_port, timeout=3)
                except Exception as e:
                    print(proxy, e)
                    continue
                else:
                    request.meta['proxy'] = 'http://' + proxy if request.url.startswith(
                        "http://") else 'https://' + proxy
                    break


class KuaidailiSfpmProxyMiddleware(object):
    def process_request(self, request, spider):
        if spider.name == 'sfpm':
            if 'item_list' in request.url:
                key_name, proxy_ip = spider.redis_conn.brpop('proxy', timeout=3 * 24 * 60 * 60)
                proxy_dict = {
                    "http": "http://hifo:hifo1996@{}:65500".format(proxy_ip),
                    "https": "https://hifo:hifo1996@{}:65500".format(proxy_ip)
                }
                request.meta['proxy'] = proxy_dict['http'] if request.url.startswith("http://") else proxy_dict['https']
            else:
                dailiyun_server = 'http://192.168.5.233:5555/random'
                retries = request.meta.get('retry_times', 0)
                try:
                    resp = requests.get(url=dailiyun_server)
                    assert resp.status_code == 200
                except:
                    logger.error(f'{dailiyun_server}  代理服务器出错，获取代理失败')
                    time.sleep(60 * 2 ** retries)
                else:
                    proxy_dict = {
                        "http": f"http://{resp.content.decode()}",
                        "https": f"https://{resp.content.decode()}",
                    }
                    request.meta['proxy'] = proxy_dict['http'] if request.url.startswith("http://") else proxy_dict[
                        'https']


class YueyangProxyMiddleware(object):
    def process_request(self, request, spider):
        if spider.name == 'yueyang':
            key_name, proxy_ip = spider.yy_redis_conn.brpop(PROXY_YY_KEY, timeout=30 * 60)
            proxy_dict = {
                "http": "http://{}".format(proxy_ip),
                "https": "https://{}".format(proxy_ip)
            }
            request.meta['proxy'] = proxy_dict['http'] if request.url.startswith("http://") else proxy_dict['https']


class GuiyangMiddleware(object):
    def process_request(self, request, spider):
        if spider.name == 'guiyang':
            if not request._url.startswith('https'):
                request._url = request._url.replace('http', 'https')
