#/usr/bin/python
# coding: utf8

from scrapy.item import Item, Field


class TopbaoItem(Item):
    title = Field()
    price = Field()
    saler = Field()

#from urlparse import urljoin
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
#from scrapy.http import Request
from mybot.items import TopbaoItem


class TaobaoItemSpider(BaseSpider):
    name = "titems"
    allowed_domains = ["s.taobao.com"]
    url0 = 'http://s.taobao.com/search?q=%D6%F1%CC%BF%B3%FD%B3%F4%D0%AC%B5%E6&initiative_id=staobaoz_20120907'
    #url1 = 'http://s.taobao.com/search?q=%B3%FD%B3%F4%D0%AC%B5%E6&commend=all&ssid=s5-e&search_type=item&sourceId=tb.index'
    start_urls = [url0]

    def parse(self, response):
        hxs = HtmlXPathSelector(response)
        results = hxs.select('//div[@id="list-content"]/form[@id="bid-form"]/ul[1]/li').extract()
        for i in range(0, len(results)):
            item = TopbaoItem()
            ihxs = HtmlXPathSelector(results[i])
            item.title = ihxs.select('/h3/a/text()').extract()
            item.price = ihxs.select('/h3/a/text()').extract()
            item.saler = ihxs.select('/h3/a/text()').extract()
            yield item

from os import path
from scrapy import signals
from scrapy.xlib.pydispatch import dispatcher


class MybotPipeline(object):
    filename = 'mylist.txt'

    def __init__(self):
        self.f = None
        dispatcher.connect(self.open, signals.engine_started)
        dispatcher.connect(self.close, signals.engine_stopped)

    def process_item(self, domain, item):
        self.f.write(str(item)+ '\n')
        return item

    def open(self):
        if path.exists(self.filename):
            self.f = open(self.filename, 'a')
        else:
            self.f = open(self.filename, 'w')

    def close(self):
        self.f.close() if self.f is not None else None


SPIDER = TaobaoItemSpider()
