# -*- coding: utf-8 -*-

# Define here the models for your spider middleware
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/spider-middleware.html

import os
import random
import redis
import json
import time
import logging
from scrapy.exceptions import IgnoreRequest
from scrapy.utils.response import response_status_message
from scrapy.downloadermiddlewares.retry import RetryMiddleware
from scrapy.http import HtmlResponse
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC

from tools import yundama
from utils import common, errors
from user_agents import agents
from cookies import MyCookies

logging.getLogger("selenium").setLevel(logging.WARNING)  # 将selenium的日志级别设成WARNING，太烦人


class UserAgentMiddleware(object):
    """ 换User-Agent """

    def process_request(self, request, spider):
        agent = random.choice(agents)
        request.headers["User-Agent"] = agent


class CookiesMiddleware(RetryMiddleware):
    """ 维护Cookie """

    def __init__(self, settings, crawler):
        RetryMiddleware.__init__(self, settings)
        if crawler.spider.name == "weibo":
            self.rconn = redis.StrictRedis(**settings["REDIS_CONN"])
            self.myCookies = MyCookies()
            self.myCookies.initCookie(self.rconn, crawler.spider.name)

    @classmethod
    def from_crawler(cls, crawler):
        return cls(crawler.settings, crawler)

    def process_request(self, request, spider):
        if spider.name == "weibo":
            redisKeys = self.rconn.keys()
            while len(redisKeys) > 0:
                elem = random.choice(redisKeys)
                if bytes('{0}:Cookies'.format(spider.name), "utf8") in elem:
                    cookies = json.loads(common.bytes_to_str(self.rconn.get(elem)))
                    request.cookies = cookies
                    request.meta["accountText"] = str(elem,"utf8").split("Cookies:")[-1]
                    spider.log.logger.info("Using cookies: {0}".format(request.meta["accountText"]))
                    break
                else:
                    redisKeys.remove(elem)


    def process_response(self, request, response, spider):
        if spider.name == "weibo":
            if "retcode=6102" in response.url:
                spider.log.logger.warning("One Cookie need to be updated:{0}".format(request.meta['accountText']))
                self.myCookies.updateCookie(request.meta['accountText'], self.rconn, spider.name)
                reason = "weibo retcode=6102"
                return self._retry(request, reason, spider) or response  # 重试

            if response.status in [300, 301, 302, 303]:
                try:
                    redirect_url = response.headers["location"]
                    if b"login.weibo" in redirect_url or b"login.sina" in redirect_url:  # Cookie失效
                        spider.log.logger.warning("One Cookie need to be updated...")
                        self.myCookies.updateCookie(request.meta['accountText'], self.rconn, spider.name)
                    elif b"weibo.cn/security" in redirect_url:  # 账号被限
                        spider.log.logger.warning("One Account is locked! Remove it!")
                        self.myCookies.removeCookie(request.meta["accountText"], self.rconn, spider.name)
                    elif b"weibo.cn/pub" in redirect_url:
                        spider.log.logger.warning(
                            "Redirect to 'http://weibo.cn/pub'!( Account:%s )" % request.meta["accountText"].split("--")[0])
                    reason = response_status_message(response.status)
                    return self._retry(request, reason, spider) or response  # 重试
                except Exception as e:
                    raise IgnoreRequest
            elif response.status in [403, 414]:
                spider.log.logger.error("%s! Stopping..." % response.status)
                os.system("pause")
            else:
                return response
        else:
            return response


class JSPageMiddleware(RetryMiddleware):
    def __init__(self, settings, crawler):
        RetryMiddleware.__init__(self, settings)
        self.settings = settings
        # 验证码类型，# 例：1004表示4位字母数字，不同类型收费不同。请准确填写，否则影响识别率。在此查询所有类型 http://www.yundama.com/price.html
        self.codetype = 1004
        if crawler.spider.name == "weibo":
            crawler.spider.browser.get("http://s.weibo.com/")

    @classmethod
    def from_crawler(cls, crawler):
        return cls(crawler.settings, crawler)

    def process_request(self,request,spider):
        if spider.name=="weibo" and "weibo" in request.url:
            for key,value in request.cookies.items():
                spider.browser.add_cookie({'name':key, 'value':value})
            spider.browser.get(request.url)
            time.sleep(random.uniform(3, 5))
            try:
                # we have to wait for the page to refresh, the last thing that seems to be updated is the title
                WebDriverWait(spider.browser, 10).until(EC.presence_of_element_located((By.CSS_SELECTOR, ".comment_txt")))
            except Exception as e:
                spider.log.logger.error(str(e))
                # raise errors.CSSSelectorError
            return HtmlResponse(url=spider.browser.current_url,body=spider.browser.page_source,encoding="utf-8",request=request)


    def process_response(self, request, response, spider):
        if spider.name == "weibo":
            try:
                code = spider.browser.find_element_by_xpath('//input[@node-type="yzm_input"]')
                code.clear()
                try:
                    path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "captcha")
                    if self.settings.DEBUG:
                        filename1 = os.path.join(path, "aa.png")
                        filename2 = os.path.join(path, "ab.png")
                        filename3 = os.path.join(path, "ac.png")
                    else:
                        filename1 = self.settings.ROOT_DIR + "aa.png"
                        filename2 = self.settings.ROOT_DIR + "ab.png"
                        filename3 = self.settings.ROOT_DIR + "ac.png"

                    spider.browser.save_screenshot(filename1)
                    from PIL import Image
                    img = spider.browser.find_element_by_xpath('//img[@node-type="yzm_img"]')
                    x = img.location["x"]
                    y = img.location["y"]
                    im = Image.open(filename1)
                    im.crop((x, y, 104 + x, y + 30)).save(filename2)  # 剪切出验证码

                    ydm = yundama.YDMHttp(**self.settings["YUNDAMA_ACCOUNT"])
                    code_txt = ydm.identifyCaptcha(filename2, self.codetype)
                    code.send_keys(code_txt)
                    spider.browser.save_screenshot(filename3)
                    button = spider.browser.find_element_by_xpath('//a[@node-type="yzm_submit"]')
                    button.click()
                except Exception as e:
                    spider.log.logger.error(str(e))
                return HtmlResponse(url=spider.browser.current_url, body=spider.browser.page_source, encoding="utf-8",
                                    request=request)
            except Exception as e:
                return response
        else:
            return response