import re
import time

import requests
from scrapy.downloadermiddlewares.retry import RetryMiddleware
from scrapy.http import HtmlResponse
from scrapy.downloadermiddlewares.useragent import UserAgentMiddleware
import random
import logging
from scrapy.utils.python import global_object_name

from commonresources.inner_utils.ip_proxy_utils import get_single_ip

logger = logging.getLogger(__name__)

spider_name_of_need_ip = [
    # "HeNanShengGongGongZiYuanJiaoYiZhongXinMenHuWangZhan",
    "ShanDongShengCaiGouYuZhaoBiaoWang",
    "ShanDongShengGongGongZiYuanJiaoYiWang",
    "BeiJingShiJianSheGongChengXinXiWang",
    'AnHuiShengJiaoTongYunShuTing',
    "ShanXiGongGongZiYuanJiaoYiZhongXin"
]
spider_name_of_need_ip_one = [

]

spider_delay_dict = {
    "ShanDongShengCaiGouYuZhaoBiaoWang": 10,
}


class IP_DaiLi(object):
    def __init__(self):
        self.before_time = 0

    def process_request(self, request, spider):
        if spider.name in spider_name_of_need_ip:
            after_time = time.time()
            if not self.before_time or (after_time - self.before_time > 150) \
                    or spider.name in spider_name_of_need_ip_one:
                self.before_time = after_time
                request.meta['proxy'] = get_single_ip()


class AbsoluteSleepMiddleware(object):
    def process_request(self, request, spider):
        if spider.name in spider_delay_dict:
            time.sleep(spider_delay_dict[spider.name])


class IpDaily10MinAgent:
    def __init__(self):
        self.ip_ignore = []

    def error_back(self, response):
        ip = response.meta['ip_address'].split()
        self.ip_ignore.append(ip)
        print("目前无用的ip有{0},这里应该是要访问代理服务器删除该ip".format(self.ip_ignore))

    def get_proxy(self):
        return requests.get("http://10.101.2.114:5010/get/").json()['proxy']

    def get_proxies_all(self):
        return requests.get("http://10.101.2.114:5010/get_all/").json()['proxy']

    def delete_proxy(self, proxy):
        """放到error_back里面"""
        pass
        # requests.get("http://10.101.2.114:5010/delete/?proxy={}".format(proxy))

    def process_request(self, request, spider):
        pass
        # count = 0
        # while 1:
        #     count += 1
        #     proxy = self.get_proxy()
        #     if proxy.split(":")[-1] not in self.ip_ignore:
        #         print(f"试了{count}次，终于请求到了不同的ip：{proxy}")
        #         break

        # ip_port = "180.123.204.60:20799"
        # request.meta['proxy'] = f"http://{ip_port}"
        #

        # request.meta['proxy'] = f"http://{proxy}"
        # request.meta['proxy'] = "http://" + random.choice(proxy_pool)


class CookieFailDownloaderMiddleare(RetryMiddleware):
    def _retry(self, request, reason, spider):
        retries = request.meta.get('retry_times', 0) + 1

        retry_times = self.max_retry_times

        if 'max_retry_times' in request.meta:
            retry_times = request.meta['max_retry_times']

        stats = spider.crawler.stats
        if retries <= retry_times:
            logger.debug("Retrying %(request)s (failed %(retries)d times): %(reason)s",
                         {'request': request, 'retries': retries, 'reason': reason},
                         extra={'spider': spider})
            retryreq = request.copy()
            retryreq.meta['retry_times'] = retries
            retryreq.dont_filter = True
            retryreq.priority = request.priority + self.priority_adjust

            if isinstance(reason, Exception):
                reason = global_object_name(reason.__class__)

            print(f"aaaaaaa之前的cookie_serverid:{request.meta['cookie_serverid']}")
            print(f"之前的headers:{request.headers}")
            if spider.name == "ZheJiangShengGongGongZiYuanJiaoYiPingTai":
                res = requests.get(url="http://www.zjpubservice.com/",
                                   headers={
                                       "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8"
                                                 ",application/signed-exchange;v=b3;q=0.9",
                                       "Accept-Encoding": "gzip, deflate",
                                       "Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6",
                                       "Host": "www.zjpubservice.com",
                                       "Proxy-Connection": "keep-alive",
                                       "Upgrade-Insecure-Requests": "1",
                                       "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.111 Safari/537.36 Edg/86.0.622.58",
                                   })
                cookie_serverid = re.findall(r'HttpOnly, (.*?);Path', res.headers['Set-Cookie'])[0]
                retryreq.headers["Cookie"] = f"HttpOnly; JSESSIONID=A2705C89D6356F3EC1B904A3FC5CAFA6; HttpOnly; " \
                                             f"oauthClientId=demoClient; oauthPath=http://223.4.69.84:8080/EpointWebBuilder;" \
                                             f" oauthLoginUrl=http://127.0.0.1/membercenter/login.html?redirect_uri=; " \
                                             f"oauthLogoutUrl=; noOauthRefreshToken=1dc347b4d59250e0d344fa1a896e6808;" \
                                             f" noOauthAccessToken=bf3606f81125ad67d3fa1132ffc360dd; {cookie_serverid}",
                print(f"之后的cookie_serverid:{cookie_serverid}")
                print(f"之后的headers:{request.headers}")

            stats.inc_value('retry/count')
            stats.inc_value('retry/reason_count/%s' % reason)
            return retryreq
        else:
            stats.inc_value('retry/max_reached')
            logger.error("Gave up retrying %(request)s (failed %(retries)d times): %(reason)s",
                         {'request': request, 'retries': retries, 'reason': reason},
                         extra={'spider': spider})


class DropReferDownloaderMiddlewar(object):
    def process_request(self, request, spider):
        # print(spider.name)
        no_referer_list = [
            # "GuangDongShengQuanGuoGongGongZiYuanJiaoYiPingTai",
            'ShanDongShengGongGongZiYuanJiaoYiWang',
            'test_scrapy_demo',
            'ZheJiangShengGongGongZiYuanJiaoYiPingTai',
            "HangYeBaiQiang",
            'SuZhouGongGongZiYuanJiaoYiPingTai',
            'JiangSuZhengFuCaiGouWang',
            'XuZhouGongGongZiYuanJiaoYiPingTai',
            'YanChengShiGongGongZiYuanJiaoYiWang',
            'LianYunGangGongGongZiYuanJiaoYiWang',
            'HeBeiShengZhaoBiaoTouBiaoFuWuPingTai',
            'yangzhoushigonggongziyuanjiaoyifuwupingtai',
            'XuZhouShiGongGongZiYuanJianSheGongChengJiaoYiPingTai',
            'NanJingShiJianSheGongChengJiaoYiZhongXin',
            'JiangSuGongGongZiYuanJiaoYiWang',
            'JiangSuShengZhaoBiaoTouBiaoGongGongFuWuPingTai',
            'ZheJiangZhengFuCaiGouWang',
            'HuBeiShengGongGongZiYuanJiaoYiPingTai',
            'HuBeiShengDianZiZhaoTouBiaoJiaoYiPingTai',
            'BeiJingShiZhengFuCaiGouWang',
            'ZhongGuoZhaoBiaoTouBiaoFuWuPingTai',
            'JiangSuShengGongLuShuiLuJianSheShiChangXinYongXinXiFuWuXiTong',
            'JiangSuJianSheGongChengZhaoBiaoWang',
            'HangYeCompanyCount',
        ]
        if spider.name == "SuZhouGongGongZiYuanJiaoYiPingTai" and "referer" in request.headers and request.url == "http://www.szzyjy.com.cn/EpointWebBuilder/JyxxSearchAction.action?cmd=getList1&categorynum=003&diqu=%E8%8B%8F%E5%B7%9E%E5%B8%82&xmmc=&zstype=&zblx=&starttime=&endtime=&siteguid=7eb5f7f1-9041-43ad-8e13-8fcb82ea831a&pageIndex=0&pageSize=15":
            del request.headers["referer"]
        if spider.name in no_referer_list and "referer" in request.headers:
            del request.headers["referer"]

        if spider.name == "JinChanDianZiZhaoTouBiaoZongHeJiaoYiPingTai2" and "id=" in request.url and "&_=" in request.url:
            url_ = request.url
            url_ = url_.split("&_=")[0] + "&_=" + str(int(float(time.time()) * 100))
            request._set_url(url_)


class SeleniumDownloaderMiddleware(object):
    # 可以拦截到request请求
    def process_request(self, request, spider):
        # 在进行url访问之前可以进行的操作, 更换UA请求头, 使用其他代理等
        pass

    # 可以拦截到response响应对象(拦截下载器传递给Spider的响应对象)
    def process_response(self, request, response, spider):
        """
        三个参数:
        # request: 响应对象所对应的请求对象
        # response: 拦截到的响应对象
        # spider: 爬虫文件中对应的爬虫类 Spider 的实例对象, 可以通过这个参数拿到 Spider 中的一些属性或方法
        """

        if hasattr(spider, 'browser'):
            spider.browser.get(url=request.url)
            time.sleep(2)
            row_response = spider.browser.page_source
            return HtmlResponse(url=spider.browser.current_url, body=row_response, encoding='utf-8', request=request)
        else:
            return response


user_agent_list = [
    "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 "
    "(KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1",
    "Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 "
    "(KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11",
    "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 "
    "(KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6",
    "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.6 "
    "(KHTML, like Gecko) Chrome/20.0.1090.0 Safari/536.6",
    "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 "
    "(KHTML, like Gecko) Chrome/19.77.34.5 Safari/537.1",
    "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 "
    "(KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5",
    "Mozilla/5.0 (Windows NT 6.0) AppleWebKit/536.5 "
    "(KHTML, like Gecko) Chrome/19.0.1084.36 Safari/536.5",
    "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 "
    "(KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
    "Mozilla/5.0 (Windows NT 5.1) AppleWebKit/536.3 "
    "(KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
    "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_0) AppleWebKit/536.3 "
    "(KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
    "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 "
    "(KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
    "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 "
    "(KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
    "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 "
    "(KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
    "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 "
    "(KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
    "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/536.3 "
    "(KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
    "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 "
    "(KHTML, like Gecko) Chrome/19.0.1061.0 Safari/536.3",
    "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.24 "
    "(KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24",
    "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/535.24 "
    "(KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24"
]


class RandomUserAgent(UserAgentMiddleware):  # 如何运行此中间件? settings 直接添加就OK
    def process_request(self, request, spider):
        ua = random.choice(user_agent_list)
        # 在请求头里设置ua
        request.headers.setdefault("User-Agent", ua)
        f = 1
        # print(res)
        # print(ua)


class RandomDelayMiddleware(object):  # sun/12.4/随机延迟模块
    def __init__(self, delay):
        self.delay = delay

    @classmethod
    def from_crawler(cls, crawler):
        delay = crawler.spider.settings.get("RANDOM_DELAY", 10)
        if not isinstance(delay, int):
            raise ValueError("RANDOM_DELAY need a int")
        return cls(delay)

    def process_request(self, request, spider):
        if spider.name == "ShanDongShengShuiLiGongChengZhaoBiaoTouBiaoGongGongFuWuPingTai":
            delay = random.randint(0, self.delay)  # 0到self.delay中随机延时，下面代码设置的是3
            logging.debug("### random delay: %s s ###" % delay)  # 在控制台输出延迟的时间
            time.sleep(delay)
