# -*- coding: utf-8 -*-
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from pyquery import PyQuery as pq
import re
from scrapy.http import Request as sreq
from scrapy.http import FormRequest as fr
from girl.items.poke import *
from scrapy.shell import inspect_response
import requests as req
import time
import datetime
class PokeSpider(CrawlSpider):
    name = "poke"
    start_urls = ['https://www.pokepara.jp/_hokkaido/',
 'https://www.pokepara.jp/miyagi/',
 'https://www.pokepara.jp/fukushima/',
 'https://www.pokepara.jp/yamagata/',
 'https://www.pokepara.jp/iwate/',
 'https://www.pokepara.jp/aomori/',
 'https://www.pokepara.jp/akita/',
 'https://www.pokepara.jp/niigata/',
 'https://www.pokepara.jp/nagano/',
 'https://www.pokepara.jp/yamanashi/',
 'https://www.pokepara.jp/toyama/',
 'https://www.pokepara.jp/ishikawa/',
 'https://www.pokepara.jp/tokyo/',
 'https://www.pokepara.jp/kanagawa/',
 'https://www.pokepara.jp/saitama/',
 'https://www.pokepara.jp/chiba/',
 'https://www.pokepara.jp/gunma/',
 'https://www.pokepara.jp/tochigi/',
 'https://www.pokepara.jp/ibaraki/',
 'https://www.pokepara.jp/_shizuoka/',
 'https://www.pokepara.jp/aichi/',
 'https://www.pokepara.jp/gifu/',
 'https://www.pokepara.jp/mie/',
 'https://www.pokepara.jp/osaka/',
 'https://www.pokepara.jp/kyoto/',
 'https://www.pokepara.jp/hyogo/',
 'https://www.pokepara.jp/nara/',
 'https://www.pokepara.jp/shiga/',
 'https://www.pokepara.jp/wakayama/',
 'https://www.pokepara.jp/hiroshima/',
 'https://www.pokepara.jp/fukuoka/',
 'https://www.pokepara.jp/nagasaki/',
 'https://www.pokepara.jp/kumamoto/',
 'https://www.pokepara.jp/oita/',
 'https://www.pokepara.jp/miyazaki/',
 'https://www.pokepara.jp/kagoshima/',
 'https://www.pokepara.jp/_okinawa/']

    # rules = (
    #     # Rule(LinkExtractor(restrict_xpaths="//li[@class='all_category']//div[@class='popover']//a"),follow=True,callback="parse0"),
    #     # Rule(LinkExtractor(restrict_xpaths="//div[@id='paging']//a"),follow=True,callback="parse0"),
    # )

    custom_settings = {
        # 'RETRY_ENABLED' : False,
        # "DOWNLOAD_TIMEOUT": 380,
        # "DOWNLOAD_DELAY ": 0.25,
        "CONCURRENT_REQUESTS": 1,
    }
    # def parse(self, response):
    #     import ipdb;ipdb.set_trace()

    def parse(self,response):
        x = response.xpath
        for y in x("//div[@id='list']//li[@class='dx']"):
            y = y.xpath
            store = {}
            store["cover"] = y(".//div[@class='photo']/a/img/@src").extract_first()
            store["area"] = y(".//div[@class='area_gyousyu']/text()").extract_first()
            url = y(".//div[@class='photo']/a/@href").extract_first()
            store["name"] = y(".//div[@class='shop_name']/a/text()").extract_first()
            store["cate"] = y(".//div[@class='shop_name']/span/text()").extract_first()
            store["avgAge"] = y(".//span[@class='ave']/text()").extract_first()
            store["seatNum"] = y(".//span[@class='seat']/text()").extract_first()
            store["content"] = y(".//table[@class='system']").extract_first()
            store["desc"] = y(".//span[@class='catch']").extract_first()
            url = url + "system.html"
            yield sreq(url,callback=self.parse1,meta=store)
        url = x("//div[@id='paging']//a[last()]/@href").extract_first()
        if url:
            yield sreq(url,callback=self.parse)

    def parse1(self,response):
        item = clubItem()
        x = response.xpath
        item["cover"] = response.meta["cover"]
        item["area"] = response.meta["area"]
        item["name"] = response.meta["name"]
        item["cate"] = response.meta["cate"]
        item["avgAge"] = response.meta["avgAge"]
        item["seatNum"] = response.meta["seatNum"]
        item["content"] = response.meta["content"]
        item["address"] = x("//span[@class='address']//text()").extract_first()
        item["tel"] = x("//span[@class='tel']//text()").extract_first()
        item["url"] = response.url.replace("system.html","")
        item["date"] = datetime.date.today().__str__()
        yield item
        surl = response.url.replace("system.html","")
        url = surl + "gal"
        yield sreq(url,callback=self.parse2)
        url = surl + "blog/"
        yield sreq(url,callback=self.parse5)
        url = surl + "gal/?work=on"
        yield sreq(url,callback=self.parse4)

    def parse2(self, response):
        x = response.xpath
        for y in x("//div[@class='main_wrap']/ul/li"):
            # item["cover"] = y(".//img/@src")
            # item["name"] = y(".//p[@class='gal_name']/a/text()").extract_first()
            # item["age"] = y(".//p[@class='gal_name']//span/text()").extract_first()
            url = y.xpath(".//a/@href").extract_first()
            yield sreq(url,callback=self.parse3)

    def parse3(self, response):
        item = girlItem()
        x = response.xpath
        item["cover"] = x("//ul[@class='thumnail']//a/@href").extract()
        if not item["cover"]:
            item["cover"] = x("//ul[@class='main_image']//a/@href").extract()
        item["content"] = x("//div[@class='prof_right']").extract_first()
        item["sns"] = x("//ul[@class='sns']//a/@href").extract()
        item["url"] = response.url
        item["surl"] = re.sub("gal/\d+/","",response.url)
        item["date"] = datetime.date.today().__str__()
        yield item

    def parse4(self, response):
        x = response.xpath
        item = workItem()
        item["surl"] = response.request.url
        item["date"] = datetime.date.today().__str__()
        item["work"] = []
        for y in x("//div[@class='main_wrap']/ul/li"):
            time = y.xpath(".//p[@class='honjitsu attend']/span/text()").extract_first()
            gurl = y.xpath(".//a/@href").extract_first()
            item["work"].append({"gurl":gurl,"time":time})
        yield item

    def parse5(self, response):
        x = response.xpath
        if not x("//ul[@id='blog_list']/li"):
            urls = x("//ul[@class='gal_blog_list_li']/li//a/@href").extract()
        else:
            urls = x("//ul[@id='blog_list']/li//a/@href").extract()
        if not urls:
            import ipdb;ipdb.set_trace()
        for url in urls:
            yield sreq(url,callback=self.parse6)
        url = x("//div[@id='paging']/a[last()]/@href").extract_first()
        if url:
            yield sreq(url,callback=self.parse5)

    def parse6(self, response):
        item = blogItem()
        x = response.xpath
        item["title"] = x("//div[@class='blog']/h2/a/text()").extract_first()
        item["time"] = x("//p[@class='time']/text()").extract_first()
        item["cover"] = x("//div[@class='text wordBreak']//a/@href").extract_first()
        item["gurl"] = re.sub("/blog/diary_\S+","",response.url)
        item["url"] = response.url
        item["date"] = datetime.date.today().__str__()
        yield item

