# -*- coding: utf-8 -*-

# Define here the models for your spider middleware
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/spider-middleware.html

import datetime
import requests
import datetime
import threading
import random
import execjs
import time
from bs4 import BeautifulSoup
from scrapy import signals
from BashouScrapy.wenshu.captcha import parse_captcha
from BashouScrapy.spiders.wenshu_list import ListContentHeader
from BashouScrapy.wenshu import common
from BashouScrapy.wenshu.common import thread_local_data


class BashouscrapySpiderMiddleware(object):
    # Not all methods need to be defined. If a method is not defined,
    # scrapy acts as if the spider middleware does not modify the
    # passed objects.

    @classmethod
    def from_crawler(cls, crawler):
        # This method is used by Scrapy to create your spiders.
        s = cls()
        crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
        return s

    def process_spider_input(self, response, spider):
        # Called for each response that goes through the spider
        # middleware and into the spider.

        # Should return None or raise an exception.
        return None

    def process_spider_output(self, response, result, spider):
        # Called with the results returned from the Spider, after
        # it has processed the response.

        # Must return an iterable of Request, dict or Item objects.
        for i in result:
            yield i

    def process_spider_exception(self, response, exception, spider):
        # Called when a spider or process_spider_input() method
        # (from other spider middleware) raises an exception.

        # Should return either None or an iterable of Response, dict
        # or Item objects.
        pass

    def process_start_requests(self, start_requests, spider):
        # Called with the start requests of the spider, and works
        # similarly to the process_spider_output() method, except
        # that it doesn’t have a response associated.

        # Must return only requests (not items).
        for r in start_requests:
            yield r

    def spider_opened(self, spider):
        spider.logger.info('Spider opened: %s' % spider.name)

    def process_response(self, request, response, spider):
        # 处理下载完成的response
        # 排除状态码不是304的所有以3为开头的响应
        http_code = response.status
        if http_code // 100 == 2:
            return response

# 代理
# class ProxyMiddleare(object):
#     logger = logging.getLogger(__name__)
#     def process_request(self,request, spider):
#         self.logger.debug("Using Proxy")
#         request.meta['proxy'] = 'http://127.0.0.1:9743'
#         return None

class myCustomMiddleware(object):

    def process_request(self, request, spider):
        print(request)
        request.callback = get_formdata(request.callback)
        # request['cookies'] = {"vjkl5": ListContentHeader.cookie}
        request.headers['Cookie'] = "__utmz=61363882.1507538011.1.1.utmcsr=(direct)|utmccn=(direct)|utmcmd=(none); _gscu_125736681=09075707s5tjxb15; _gscu_2116842793=075364697cyz3y17; _gscbrs_2116842793=1; _gscbrs_125736681=1; Hm_lvt_9e03c161142422698f5b0d82bf699727=1512015786,1514167243,1514167352,1514167443; Hm_lpvt_9e03c161142422698f5b0d82bf699727=1514167477; ASP.NET_SessionId=fme51b0keu0nippqyb55zv2j; wafenterurl=L2xpc3QvbGlzdC8/c29ydHR5cGU9MQ==; wafcookie=e25bd6744e0f39edc602af88ef1b920b; __utmc=61363882; wafverify=4056bce1885c38dd76caabe56e13c06e; __utma=61363882.1715024987.1507538011.1515120062.1515486097.6; Hm_lvt_3f1a54c5a86d62407544d433f6418ef5=1515632034,1515741880; ccpassport=4b6607ef167c5a758521d1fa53fad190; wzwsconfirm=43b2eb346b6d6eb7957ac41b469aa261; wzwsvtime=1516172534; wzwstemplate=Nw==; wzwschallenge=V1pXU19DT05GSVJNX1BSRUZJWF9MQUJFTDcxNDM0NDk=; Hm_lpvt_3f1a54c5a86d62407544d433f6418ef5=1516172521; _gscs_2116842793=t161714665eimnt56|pv:4; vjkl5=" + ListContentHeader.cookie
        print(request.callback)


    def process_response(self, request, response, spider):
        # 处理下载完成的response
        http_code = response.status
        if spider.name == "wenshu_list":
            result = response.text
            if result== '\'"remind key"\'':
                common.increase_change_cookie_count()
                # return Request(url=url, meta=request.meta)
                return request
            elif result.find('Html_Pages/VisitRemind.html') != -1 or result.find('remind')!=-1:
                print (str(datetime.datetime.now()) + "处于in remind状态并获取验证码")
                captcha_result = parse_captcha(thread_local_data)
                if (captcha_result == '2'):
                    common.increase_change_cookie_count()
                return request
            elif result.find('302 Found') != -1 or http_code != 200 or result.find("noscript") != -1:
                common.increase_change_cookie_count()
                return request

            elif result.find('网站当前访问量较大，请输入验证码后继续访问') != -1:
                common.increase_change_cookie_count()
                return request
            else:
                return response

def init_cookies(header, cookie_type=1):
    print ('init_cookie')
    try:
        thread_local_data.session
    except Exception:
        # requests.adapters.DEFAULT_RETRIES = 5
        thread_local_data.session = requests.Session()
        # thread_local_data.session.keep_alive = False

    # 在去请求之前先得到cookie
    if ListContentHeader.cookie is None:
        thread_local_data.session.headers.update(ListContentHeader.header)
        for i in range(0, 10):
            try:
                init_data = {'sorttype': '1', "conditions": None}
                SEARCH_PAGE = 'http://wenshu.court.gov.cn/list/list/'
                try:
                    thread_local_data.session.get(SEARCH_PAGE, params=init_data, timeout=20)
                except Exception as e:
                    print (e)
                    continue
                ListContentHeader.cookie = thread_local_data.session.cookies['vjkl5']
                print ('vjkl5:' + ListContentHeader.cookie)
                return
            except Exception as e:
                print ('获取vjkl5失败，进行重播')
                if i > 3:
                    already_redial = common.redial('redial')
                    if already_redial is True:
                        time.sleep(93)
    return

def get_guid():
    createGuid = lambda: hex(int((1 + random.random()) * 65536) | 0)[3::]
    guid = "{}{}-{}-{}{}-{}{}{}".format(createGuid(), createGuid(), createGuid(), createGuid(), \
                                        createGuid(), createGuid(), createGuid(), createGuid())
    return guid

def get_number(guid,header,fail_flag,**kwargs):
    try:
        for i in range(0, 2):
            print (str(datetime.datetime.now()) + "开始获取number,guid是：" + str(guid))
            session = thread_local_data.session
            req = requests.post("http://wenshu.court.gov.cn/ValiCode/GetCode", data={'guid':guid},headers=ListContentHeader.header)
            # prepped = session.prepare_request(req)
            # if SpiderConf.use_proxy:
            #     print ("proxy:" + thread_local_data.proxy_str)
            #     resp = session.send(prepped, proxies=thread_local_data.proxy, **kwargs)
            # else:
            #     resp = session.send(prepped, **kwargs)
            # # resp = session.post("http://wenshu.court.gov.cn/ValiCode/GetCode",{'guid':guid})
            result = req.text

            if result.find('Html_Pages/VisitRemind.html') != -1:
                print (str(datetime.datetime.now()) + "第" + str(i) + "次处于in remind状态并获取验证码")
                parse_captcha(thread_local_data)
            elif result.find('302 Found') != -1 or req.status_code != 200 or result.find("noscript") != -1:
                common.increase_change_cookie_count()
                return fail_flag
            elif result.find('网站当前访问量较大，请输入验证码后继续访问') != -1:
                common.increase_change_cookie_count()
                return fail_flag
            elif len(result) == 0:
                return fail_flag
            else:
                print ("number:"+result)
                return result
        return fail_flag
    except Exception as e:
        print ("Exception 线程" + str(threading.currentThread()) + str(e))
        common.increase_change_cookie_count()
        return fail_flag

def get_key(cookiestr):
    # key = execjs.compile(open("spiders/get_key.js").read()).call('getKey', cookiestr)
    js = ""
    fp1 = open(r'spiders/sha1.js')
    js += fp1.read()
    fp1.close()
    fp2 = open(r'spiders/md5.js')
    js += fp2.read()
    fp2.close()
    fp3 = open(r'spiders/base64.js')
    js += fp3.read()
    fp3.close()
    fp4 = open(r'spiders/vl5x.js')
    js += fp4.read()
    fp4.close()
    ctx2 = execjs.compile(js)
    key = (ctx2.call('vl5x', cookiestr))
    print ('key:'+ key)
    return key

def list_data_handle(formdata):
    # init_proxy(thread_local_data)
    header = ListContentHeader.header
    try:
        init_cookies(header, 2)
        guid = get_guid()
        number = get_number(guid, header, False, timeout=30)
        if number == False:
            # change_proxy(thread_local_data)
            common.increase_change_cookie_count()
            print ('未获取到number')
            return None, None
        # formdata['number'] = number
        # formdata['guid'] = guid
        post_data = common.post_data_encode(formdata) + '&number=' + number + '&guid=' + guid
        key = get_key(ListContentHeader.cookie)
    except Exception as error:
        print (error)
        return None, None
    if (key is not None):
        post_data = post_data  + '&vl5x=' + key
        post_data = common.get_param_data(post_data)
        # formdata['vl5x'] = key
        # post_data = get_param_data(post_data)
    return post_data, header

def get_formdata(formdata):
    while True:
       try:
           postdata, header = list_data_handle(formdata)
           if postdata is None:
               continue
           else:
               return postdata
       except Exception as error:
           print(error)
           continue