# -*- coding: utf-8 -*-
import scrapy
import os
import time
from Spider.items import A58UsedHouse
from Spider.Helper import FileHelper

class A58Spider(scrapy.Spider):
    name = '58'
    allowed_domains = ['58.com']

    def start_requests(self):
        yield scrapy.Request('http://yancheng.58.com/ershoufang/0/pn1/',callback=self.parse)

    def parse(self, response):
        '''
        当前方法用于解析列表页面,获取每个页面的详情
        :param response:列表页返回结果
        :return:
        '''
        links = response.xpath('//ul[@class="house-list-wrap"]//li/div[@class="list-info"]/h2//a/@href').extract();
        yield scrapy.Request(links[0], callback=self.parInfoPage)
        # for link in links:
        #     time.sleep(1)
        #     print(link)
        #     yield scrapy.Request(link,callback=self.parInfoPage)

    def parInfoPage(self,response):
        '''
        当前方法解析详情页
        :param response:详情页返回结果
        :return:
        '''
        # 当前文件的前两级目录
        grader_father = os.path.abspath(os.getcwd())
        dir = grader_father+os.sep+'Files'+os.sep+ self.name+os.sep + time.strftime('%Y\\%m\\%d')
        filename = str(int(time.time()))+'.html'
        # FileHelper.createFile(dir,filename,response.body)#保存文件
        #解析内容

        Item = A58UsedHouse()
        Item['title'] =response.xpath("//div[@class='house-title']/h1/text()").extract_first()
        Item['price'] =response.xpath("//span[@class='price']/text()").extract_first()
        Item['room']['main'] =response.xpath("//p[@class='room']/span[@class='main']/text()").extract_first()
        Item['price'] =response.xpath("//span[@class='price']/text()").extract_first()
        return Item
