# -*- coding: utf-8 -*-
import scrapy
import time
import re
from lxml import etree
from hxwz2Spider.items import Hxwz2SpiderItem
from scrapy.linkextractors import LinkExtractor

class Hxwz2Spider(scrapy.Spider):
	name = 'hxwz2'
	allowed_domains = ['hxwz2.com']
	start_urls = ['http://www.hxwz2.com/forum-37-1.html']

	def parse(self, response):
		pls = response.xpath('//tbody[contains(@id,"normalthread")]/tr/th/a[@class="s xst"]').getall()
		for li in pls:
			item = Hxwz2SpiderItem()
			item['url'] = etree.HTML(li).xpath('//@href')[0]
			item['title'] = etree.HTML(li).xpath('//text()')[0]
			yield scrapy.Request(url=item['url'], meta={'item': item}, callback=self.parse_detail)
			time.sleep(1)

		page_list = response.xpath('//a[@class="nxt"]/@href').get()
		if page_list:
			yield scrapy.Request(url=page_list, callback=self.parse)
	def parse_detail(self, response):
		items = response.meta['item']
		items_content = response.xpath(r'//td[@class="t_f"]').get()
		prttern = re.compile(r'<img.*file="(.*?)".*>')
		items['img'] = prttern.findall(items_content)
		items['content'] =items_content.replace("\r\n","\n").replace('<td','<div').replace("/td>","/div>")
		return items
