# -*- coding:utf-8 -*-
import scrapy
import re
from ..items import FreebufScrapyItem

class Freebuf(scrapy.Spider):
	name = 'freebuf'
	allowed_domains = ['freebuf.com']
	start_urls = ['http://www.freebuf.com/articles/system']

	def parse(self, response):
		# Test: the freebuf (can crawl or not)
		#self.log('A response from %s just arrived!' % response.url)
		#print response.body
		paper_list = response.xpath('//*[@id="timeline"]/div')
		for paper in paper_list:
			# Test: crawl the url or not 
			#item = FreebufScrapyItem()
			#item['url'] = paper.xpath('div[2]/dl/dt/a/@href').extract()
			#yield item
			url = paper.xpath('div[2]/dl/dt/a/@href').extract()[0]
			yield scrapy.Request(url, callback = self.parse_content)

	def parse_content(self, response):
		item = FreebufScrapyItem()
		name = response.xpath('//div[@class="title"]//span[@class="name"]/a/text()').extract()
		title = response.xpath('//div[@class="title"]/h2/text()').extract()
		content = response.xpath('//div[@id="contenttxt"]').extract()
		#for p in info:
			#content = p.xpath('string(.)').extract()
			#item['content'] = [c.encode('utf-8') for c in content]
			#items.append(item)

		item['name'] = [n.encode('utf-8') for n in name]
		item['title'] = [t.encode('utf-8') for t in title]
		item['content'] = [c.encode('utf-8') for c in content] 
		yield item
		#items.append(item)

		#return items