#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Project: spd-sxmcc
"""
@author: lyndon
@time Created on 2018/11/29 10:24
@desc
"""

import urllib2

# from spidermanager.header_switch import HeadersSelector
from pyquery import PyQuery as pyq
from pyspider.libs.base_handler import *

from com.teradata.laccelllatitude.header_switch import HeadersSelector
from ippool.lib.pooldao import IpPool

# from spidermanager.proxy_ip_pool import IpPool

url_prefix = 'https://ty.5i5j.com'
header_5i5j = {
    "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
    "Accept-Encoding": "gzip, deflate, br",
    "Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8,pl;q=0.7",
    "Cache-Control": "max-age=0",
    "Connection": "keep-alive",
    "Host": "ty.5i5j.com",
    "Referer": "https://ty.5i5j.com/xiaoqu/",
    "Upgrade-Insecure-Requests": 1,
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.110 Safari/537.36"
}


class Handler(BaseHandler):
    crawl_config = {
        "user_agent": "Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36",
        "timeout": 120,
        "connect_timeout": 60,
        "retries": 5,
        "fetch_type": 'js',
        "auto_recrawl": True,
    }

    @every(minutes=24 * 60)
    def on_start(self):
        citys = ['ty']
        for each_city in citys:
            header_slt = HeadersSelector()
            header = header_slt.select_header()
            url = 'https://%s.5i5j.com/xiaoqu/' % (each_city)
            self.crawl(url, save={'url': url}, callback=self.index_page, validate_cert=False, headers=header,
                       fetch_type='js', retries=5)

    @config(age=24 * 60 * 60)
    def index_page(self, response):
        save = response.save
        url = save['url']

        ip_table = IpPool()
        proxy_ip_port = ip_table.select_random_proxy()
        proxy_handler = urllib2.ProxyHandler({"http": "%s" % proxy_ip_port, "https": "%s" % proxy_ip_port})
        opener = urllib2.build_opener(proxy_handler)
        html = opener.open(url, timeout=20).read()

        py_html = pyq(html, parser='html')
        xiaoqu_link_list = py_html('.listTit').items()
        next_page_url = py_html('.cPage').attr.href

        for link in xiaoqu_link_list:
            # print(type(link))
            # print(link)
            detail_url = link('a').attr.href
            detil_url = url_prefix + detail_url
            # print(detil_url)
            # print(link.text())
            header_slt = HeadersSelector()
            header = header_slt.select_header()
            self.crawl(detil_url, callback=self.detail_page, validate_cert=False, fetch_type='js',
                       headers=header, save={'name': link.text(), 'url': detail_url}, retries=5)
        self.crawl(next_page_url, save={'url': url_prefix + next_page_url}, callback=self.index_page,
                   validate_cert=False, fetch_type='js', retries=10)

    @config(priority=2)
    def detail_page(self, response):
        save = response.save
        name = save['name']
        detil_url = url_prefix + save['url']
        ip_table = IpPool()
        proxy_ip_port = ip_table.select_random_proxy()
        proxy_handler = urllib2.ProxyHandler({"http": "%s" % proxy_ip_port, "https": "%s" % proxy_ip_port})
        opener = urllib2.build_opener(proxy_handler)
        html = opener.open(detil_url, timeout=20).read()

        py_html = pyq(html, parser='html')
        xqjprice = py_html('.xqjprice > span').text()
        xqfangs = py_html('.xqfangs > ul > li').items()
        # print(xqjprice)
        xqf_lst = [xqf.text().encode('utf-8') for xqf in xqfangs]
        # print(xqf_lst)
        return {
            "url": response.url,
            "title": response.doc('title').text(),
            "name": name,
            "xqjprice": xqjprice,
            "xqf_lst": xqf_lst,
        }
