# -*- coding:UTF-8 -*-
import time

import scrapy
import json
from scrapy_splash import SplashRequest, SplashTextResponse
from Lactationer_Slave.util.redisClient import redisClient
from Lactationer_Slave.util import splashArgs
from Lactationer_Slave.items import LactationerSlaveItem


import base64

# 代理服务器
proxyHost = "http-dyn.abuyun.com"
proxyPort = "9020"
# 代理隧道验证信息
proxyUser = "H7790120C0414TND"
proxyPass = "BA74FBC00340C7A3"

class Lactationer_Slave(scrapy.Spider):

    name = 'Lactationer_Slave'
    #启动start_url 随便给一个就行，只在第一次启动时有用
    start_urls = ['http://www.ganji.com/index.htm']

    # 处理城市列表，带入搜索关键词
    def parse(self, response):
        print('-------------------start project-------------------------')
        request = scrapy.Request(callback=self.start_requests)
        yield request

    def start_requests(self):
        while True:
            r = redisClient()
            time.sleep(0.5)
            detail_url = r.getEffectiveUrl
            proxyMeta = "http://%(user)s:%(pass)s@%(host)s:%(port)s" % {
                "host": proxyHost,
                "port": proxyPort,
                "user": proxyUser,
                "pass": proxyPass,
            }
            if detail_url is not None:
                headers = {
                    "User-Agent": "Mozilla/5.0 (Windows; U; MSIE 9.0; Windows NT 9.0; en-US)",
                    "Proxy-Switch-Ip": "yes"
                }
                if (detail_url.__contains__('wuba_info')):
                    # 58的直接进入联系方式页面展示
                    splash = SplashRequest(
                        detail_url,
                        self.parse_detail,
                        endpoint='execute',
                        args={
                            "http_method": "GET",
                            'lua_source': splashArgs.SplashArgs.wuba_info,
                            "headers": headers,
                        },
                        meta={
                            "proxy": proxyMeta,
                        }
                    )
                else:
                    # 赶集网自己的数据
                    splash = SplashRequest(
                        detail_url,
                        self.parse_detail,
                        endpoint='execute',
                        args={
                            "http_method": "GET",
                            'lua_source': splashArgs.SplashArgs.fuwu_dian,
                            "headers": headers,
                        },
                        meta={
                            "proxy": proxyMeta,
                        },
                    )
                yield splash
            else:
                break

    # scrapy-splash 处理item
    def parse_detail(self, response):
        print('splash response:', response)
        if isinstance(response, SplashTextResponse):
            r = redisClient()
            # 失败请求,将请求放入失败列表，等待重新请求
            r.saveFaild(response.url)
        else:
            if response.data is not None:
                item = LactationerSlaveItem()
                data = json.loads(json.dumps(response.data))
                item['title'] = data['1']
                item['phone'] = data['2']
                item['detail'] = data['3']
                item['city'] = data['4']
                item['link'] = response.url
                yield item

