# -*- coding: utf-8 -*-
import sys 
reload(sys) 
sys.setdefaultencoding("utf-8") 
import re
import time
import scrapy
from scrapy.selector import Selector
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from scrapy.contrib.linkextractors import LinkExtractor
from scrapy.http import Request, FormRequest
from facejoking.items import Profile
from scrapy.contrib.loader import ItemLoader

def fst(m):
    return m.group(1)

def log_page(response, filename):
        with open(filename, 'w') as f:
            f.write("%s\n%s\n%s\n" % (response.url, response.headers, response.body))

class facejokingSpider(scrapy.Spider) :
    name = "facejoking"
    #allowed_domains = ["http://www.facejoking.com/top/1002"]

    start_urls = ["file:///home/pr/Desktop/0.html",
                  "file:///home/pr/Desktop/1.html"]

    def parse(self, response):

        def get_gender(url):
                if url.split('/')[-1][0] == "0":
                        return u"女生"
                elif url.split('/')[-1][0] == "1":
                        return u"男生"
                else:
                        return ""
        
        log_page(response, "facejoking_after.html")

        print response.url

        # wait for response

        n = len(response.xpath('//div[@id="ColumnContainer"]/div'))
        print n

        for i in range(2, n + 1):
                pfx = '//div[@id="ColumnContainer"]/div[' + str(i) + ']'

                l = Profile()
                l['school'] = u'\u5317\u4eac\u5927\u5b66'
                l['name'] = response.xpath(pfx + '/div[@class="description"]/span[1]/a[1]/text()').extract()[0]
                l['numberOfFriends'] = "0"
                l['numberOfViews'] = "0"
                l['gender'] = get_gender(response.url)
                l['ID'] = "00000000000000000" + str(i)
	        l['image_urls'] = response.xpath(pfx + '/a[1]/@href').extract()
                l['face_rate'] = response.xpath(pfx + '/div[@class="top-title"]/text()').extract()[0][3:]
                yield l

        for url in self.start_urls:
                scrapy.Request(url, callback=self.parse)
