from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from scrapy.selector import HtmlXPathSelector

from codecs import decode
from talker.items import *

class VKSpider(CrawlSpider):
    name = "vk"
    allowed_domains = ["vk.com"]
    start_urls = [
        "http://www.vk.com/search?c[q]=fish&c[section]=communities"
    ]

    rules = (
        # Extract community links that are in 'labeled title' section (a section where the link is) 
        # and parse them with the spider's method parse_community
        Rule(SgmlLinkExtractor(allow_domains="vk.com", restrict_xpaths='//div[@class="labeled title"]', unique=True), \
             callback='parse_community'),
        #Rule(SgmlLinkExtractor(allow_domains="vk.com", unique=True), callback='parse_community'),
    )

    def parse_community(self, response):
        self.log('Hi, this is a community page! %s' % response.url)
        
        hxs = HtmlXPathSelector(response)

        comItem = CommunityItem()
        comItem['link'] = response.url
        comItem['title'] = hxs.select('/html/head/title/text()').extract()
        ipanelList = hxs.select('//div[@class="ipanel"]')
        if len(ipanelList) != 0:
            ipanel = ipanelList[0]
            comItem['desc'] = hxs.select('//dl[@class="pinfo_row"]').extract()
            comItem['member_count'] = ipanel.select('.//em[@class="pm_counter"]/text()').extract()[0]
            comItem['post_count'] = ipanel.select('.//h4[@class="slim_header"]/text()').extract()[3]

        items = []
        post_infos = hxs.select('//div[@class="post_item"]')
        for info in post_infos:
            item = PostItem(comItem)
            item['desc'] = info.select('.//div[@class="pi_text"]/text()').extract()
            item['like_count'] = info.select('.//b[@class="v_like"]/text()').extract()
            #item['share_count'] = info.select('.//span[@class="post_share_count fl_l"]')
            items.append(item)

        return items

"""    def parse(self, response):
        new_res = response.body.decode('cp1251')
        response = response.replace(body=new_res)
        filename = response.url.split("/")[-2]
        codecs.open(filename, 'wb', "utf-8").write(response.body.decode('cp1251'))
        print response.body.decode('cp1251')
"""

