# -*- coding: utf-8 -*-
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from pyquery import PyQuery as pq
import re
from scrapy.http import Request as sreq
from scrapy.http import FormRequest as fr
from girl.items.night import *
from scrapy.shell import inspect_response
import requests as req
import time
import datetime
class NightSpider(CrawlSpider):
    name = "night"
    start_urls = [
                  # "http://www.nightsnet.jp/h",
                  "http://www.nightsnet.jp/th/A3ShopSearch/?bt=1100050001",
                  "http://www.nightsnet.jp/n/A3ShopSearch/?bt=230005",
                  "http://www.nightsnet.jp/tt/A3ShopSearch/?bt=120003",
                  "http://www.nightsnet.jp/t/A3ShopSearch/?bt=150002:1500020002:1500020003:1500020005&pr=0:1000000",
                  # "http://www.nightsnet.jp/k",
                  "http://www.nightsnet.jp/cg/A3ShopSearch/?bt=180004&pr=0:1000000",
                  "http://www.nightsnet.jp/s/A3KodawariSearch/?search=shop&genre1=0006&bt=190006&bc=01",
                  "http://www.nightsnet.jp/q/A3ShopSearch/?bt=200004",
                  "http://www.nightsnet.jp/hr/A3ShopSearch/?bt=160004",
                  ]

    # rules = (
    #     # Rule(LinkExtractor(restrict_xpaths="//li[@class='all_category']//div[@class='popover']//a"),follow=True,callback="parse0"),
    #     # Rule(LinkExtractor(restrict_xpaths="//div[@id='paging']//a"),follow=True,callback="parse0"),
    # )

    custom_settings = {
        # 'RETRY_ENABLED' : False,
        # "DOWNLOAD_TIMEOUT": 380,
        # "DOWNLOAD_DELAY ": 0.25,
        "CONCURRENT_REQUESTS": 1,
    }
    # def parse(self, response):
    #     import ipdb;ipdb.set_trace()

    def parse(self,response):
        x = response.xpath
        item = storeItem()
        for y in x("//div[@class='section resultList2 hot']"):
            y = y.xpath
            item["cover"] = y(".//img/@src").extract_first()
            item["name"] = y(".//div[@class='shop-name2']/a/text()").extract_first()
            item["content"] = y(".//div[@class='exit-img']").extract_first()
            item["area"] = y(".//div[@class='shop-area']").extract_first()
            item["desc"] = y(".//div[@class='info_bold']").extract_first()
            yield item
            surl = response.urljoin(y(".//div[@class='shop-name2']/a/@href").extract_first())
            # import ipdb;ipdb.set_trace()
            url = surl + "A6ShopMovieList/"
            yield sreq(url,callback=self.parse1)
            url = surl + "A5GirlKeitaiDiaryList/"
            yield sreq(url,callback=self.parse2)
            url = surl + "A6GirlList/"
            yield sreq(url,callback=self.parse3)
            url = surl + "A7ShukkinYotei/"
            yield sreq(url,callback=self.parse6)

    def parse1(self,response):
        item = movieItem()
        x = response.css
        link = LinkExtractor(allow="A6ShopGirlDogaDetail")
        for y in link.extract_links(response):
            yield sreq(y.url,callback=self.parse5)

    def parse2(self,response):
        item = blogItem()
        x = response.xpath
        for y in x("//table[@class='diary_photolay_tbl']"):
            y = y.xpath
            item["gurl"] = y(".//td[@class='diary_phototext_tbl']/p/a/@href").extract_first()
            item["cover"] = y(".//img/@src").extract_first()
            item["time"] = y(".//span[@class='diarytime']/text()").extract_first()
            item["title"] = y(".//p[@class='diary_title']/text()").extract_first()
            item["url"] = response.url
            item["date"] = datetime.date.today().__str__()
            yield item
        url = x("//a[@title='next page']/@href").extract_first()
        if url:
            yield sreq(response.urljoin(url),callback=self.parse2)

    def parse3(self, response):
        x = response.xpath
        for y in x("//ul[@id='girl_list']/li"):
            # item["name"] = y(".//p[@class='girl_name']/a/text()").extract_first()
            # item["age"] = y(".//p[@class='girl_name']/text()").extract_first()
            url = y.xpath(".//p[@class='girl_name']/a/@href").extract_first()
            yield sreq(response.urljoin(url),callback=self.parse4)

    def parse4(self,response):
        item = girlItem()
        y = y.xpath
        item["cover"] = y("//ul[@id='thum']//a/@src").extract()
        item["name"] = y("//p[@class='girl_name']/a/text()").extract_first()
        item["content"] = y("//table[@id='p_data']").extract()
        item["surl"] = y("//li[@id='m_top']/a/@href").extract_first()
        item["url"] = response.url
        item["date"] = datetime.date.today().__str__()
        yield item

    def parse5(self,response):
        item = movieItem()
        item["url"] = response.url
        item["movie"] = response.css("video::attr(src)")
        item["cover"] = response.css("video::attr(poster)")
        item["date"] = datetime.date.today().__str__()
        yield item

    def parse6(self, response):
        item = workItem()
        x = response.css
        for y in x("#shukkin_list li"):
            import ipdb;ipdb.set_trace()
            item["status"] = y("tr[last()] td[1]::text").extract_first()
            item["gurl"] = y("a::attr(href)")
            item["date"] = datetime.date.today().__str__()
            yield item