import scrapy
from lxml import etree
import sys
sys.path.append(r'C:\Users\Administrator\spider2021\spider_xc\spider_xc\spiders')
from items import SpiderXcItem

class TestSpider(scrapy.Spider):
    name = 'test'
    allowed_domains = ['www.you.ctrip.com']
    start_urls = ['https://you.ctrip.com/sight/beijing1.html']

    def parse(self, response):
        suff = 'https://you.ctrip.com'
        html = etree.HTML(response.text)
        index = html.xpath("/html/body/div[4]/div/div[2]/div/div[3]/div[17]/div/a[@class='current']/text()")
        if index[0] != '101' :
            next_page = suff+"/sight/beijing1/s0-p"+str(int(index[0])+1)+".html"
            url = response.xpath('''/html/body/div[4]/div/div[2]/div/div[3]/div[@class='list_mod2']/div[2]/dl/dt/a/@href''').getall()
            for i in url:
                yield scrapy.Request(suff+i, callback=self.sub_parse, dont_filter=True)
            yield scrapy.Request(next_page, callback=self.parse, dont_filter=True)

    def sub_parse(self, response):
        item = SpiderXcItem()
        item['title'] = response.xpath('''//*[@id="__next"]/div[3]/div/div[3]/div[2]/div[1]/h1/text()''').getall()
        item['score'] = response.xpath('''//*[@id="__next"]/div[3]/div/div[3]/div[2]/div[2]/div/p[1]/text()''').getall()
        item['addr'] = response.xpath('''//*[@id="__next"]/div[3]/div/div[3]/div[2]/div[3]/div[1]/p[2]/text()''').getall()
        item['time'] = response.xpath('''//*[@id="__next"]/div[3]/div/div[4]/div[1]/div[2]/div/div[4]/text()''').getall()
        item['policy'] = response.xpath('''//*[@id="__next"]/div[3]/div/div[4]/div[1]/div[2]/div/div[6]/div[@class='moduleContentRow']/text()''').getall()
        item['service'] = response.xpath('''//*[@id="__next"]/div[3]/div/div[4]/div[1]/div[2]/div/div[8]/div[@class='moduleContentRow']/text()''').getall()
        item['tel'] = response.xpath('''//*[@id="__next"]/div[3]/div/div[3]/div[2]/div[3]/div[3]/p[2]/text()''').getall()
        item['info'] = response.xpath('''//*[@id="__next"]/div[3]/div/div[4]/div[1]/div[2]/div/div[2]/div/div/p/text()''').getall()
        item['traffic'] = response.xpath('''//*[@id="__next"]/div[3]/div/div[4]/div[1]/div[2]/div/div[6]/text()''').getall()
        item['tips'] = response.xpath('''//*[@id="__next"]/div[3]/div/div[4]/div[1]/div[2]/div/div[12]/div/p/text()''').getall()
        picture_url = response.xpath('''//*[@id="__next"]/div[3]/div/div[3]/div[1]/div[1]/div[2]/a/@href''').getall()
        item['picture'] = 'none'
        yield scrapy.Request(picture_url[0], callback=self.picture, dont_filter=True)
        yield item

    def picture(self, response):
        item = SpiderXcItem()
        item['title'] = response.xpath('''/html/body/div[2]/div[2]/div[1]/div[1]/h1/a/text()''').getall()
        item['picture'] = response.xpath('''/html/body/div[3]/div/div[1]/div[@class='item']/a[@class='itempic']/img/@src''').getall()
        yield item

