#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created on 7/12/13
"""
import codecs
from scrapy.selector import HtmlXPathSelector
from scrapy.spider import BaseSpider
from tutorial.items import DmozItem

__author__ = 'honghe'

class CnSpider(BaseSpider):
    name = "17173"
    allowed_domains = ["17173.com"]
    start_urls = [
        "http://www.17173.com/"
    ]

    def parse(self, response):
        target_encoding = 'utf-8'
        # save response body.
        filename = response.url.split('/')[-2]
        hxs = HtmlXPathSelector(response)
        # replace html body charset value to target_encoding.
        # select charset set.
        meta1 = hxs.select("/html/head/meta[translate(@http-equiv, 'CONTENT-TYPE', 'content-type')='content-type']")
        # for html5, see http://www.w3schools.com/tags/tag_meta.asp.
        meta2 = hxs.select('/html/head/meta[@charset]')
        if meta1:
            body_unicode = response.body_as_unicode().replace(meta1.re('charset=(.+)"')[0], target_encoding, 1)
        elif meta2:
            body_unicode = response.body_as_unicode().replace(meta2.re('charset="(.+)"')[0], target_encoding, 1)
        else:
            raise Exception('Do not have charset at head.')
        # save html to charset=utf-8
        codecs.open(filename + '.html', 'wb', target_encoding).write(body_unicode)
        # codecs.open(filename + '.html', 'wb', target_encoding).write(response.body.decode(response.encoding))

        # extract item.
        # hxs = HtmlXPathSelector(response)
        # sites = hxs.select('//ul/li')
        # items = []
        # for site in sites:
        #     item = DmozItem()
        #     item['title'] = site.select('a/text()').extract()
        #     item['link'] = site.select('a/@href').extract()
        #     item['desc'] = site.select('text()').extract()
        #     items.append(item)
        #     # print item
        # return items