import scrapy
from bs4 import BeautifulSoup as soup
import re
from beautifyer.items import BeautifyerItem

class BeautifySpider(scrapy.Spider):
    name = 'beautify'
    allowed_domains = ['www.xiu123.cn']
    start_urls = ['http://www.xiu123.cn']
    cross_page=start_urls[0]+'/news?page='
    def parse(self, response):#进入新闻页
        meta={}
        dir_url=response.xpath('/html/body/div[1]/div[2]/div/div/ul/li[3]/a/@href').extract_first()
        dir_url=self.start_urls[0]+dir_url
        yield scrapy.http.Request(url=dir_url,callback=self.parse_title,meta={"n":1})

    def parse_title(self,response):#获取标题信息
        num=response.meta['n']
        meta={}
        for title in response.xpath('/html/body/div[2]/div/div[2]/div[2]/div[2]/div[1]/div[@class="left-pege"]'):
            tit_url = title.xpath('.//a/@href').extract_first()
            tit_url=self.start_urls[0]+tit_url
            yield scrapy.http.Request(url=tit_url,callback=self.parse_detail)
        num=num+1
        if num<14:#翻页
            yield scrapy.http.Request(url=self.cross_page+str(num),callback=self.parse_title,meta={'n':num})
            
    def parse_detail(self,response):#获取新闻详情
        item = BeautifyerItem()
        art_title=response.xpath('/html/body/div[2]/div/div/div[2]/div/div/div[1]/div[1]/h2/text()').extract_first().strip()
        art_time = response.xpath('/html/body/div[2]/div/div/div[2]/div/div/div[1]/div[1]/div/p/span/text()').extract_first()
        content=response.xpath('/html/body/div[2]/div/div/div[2]/div/div/div[1]/div[2]')
        art_content=''
        contents=content.xpath('string(.)').extract() #该标签下所有的文本信息
        for cont in contents:
            art_content =art_content + cont.strip()
        art_content=art_content.replace('\n','').replace('\r','').replace('\t','').replace('"','\"').replace('“', '\“').replace('”', '\”')

        item['art_title'] = art_title
        item['art_time'] = art_time
        item['art_content']=art_content
        yield item
        
       