# -*- coding: utf-8 -*-
import sys 
reload(sys) 
sys.setdefaultencoding("utf-8") 
import re
import scrapy
from scrapy.selector import Selector
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from renren.utils import *
from scrapy.contrib.linkextractors import LinkExtractor
from scrapy.http import Request, FormRequest
from renren.items import Profile
from scrapy.contrib.loader import ItemLoader

def fst(m):
    return m.group(1)

def log_page(response, filename):
        with open(filename, 'w') as f:
            f.write("%s\n%s\n%s\n" % (response.url, response.headers, response.body))

class renrenSpider(scrapy.Spider) :
    name = "renren"
    allowed_domains = ["www.renren.com"]

    queue = fileQueue()

    '''
    root page:
    [
        "http://www.renren.com/487565773/profile",
        "http://www.renren.com/301285877/profile",
        "http://www.renren.com/359310907/profile"
    ]
    '''
    start_urls = queue.get_all()

    def start_requests(self):
        return [Request("http://www.renren.com/SysHome.do",callback = self.post_login)]

   
    def post_login(self, response):
    	log_page(response, "renren_login.html")
        return [FormRequest.from_response(response,   
        	 				
                            formdata = {
                            'email': 'pkunetworklab1@163.com',
                            'password': 'pku123456'
                            },
                            callback = self.after_login,
                            )]
    def after_login(self,response):
    	for url in self.start_urls:            
            yield self.make_requests_from_url(url)
      
    def parse(self, response):
        l = Profile()
#        print "haha "+response.url
        if not is_matching_main_page(response.url):
            return

    	log_page(response,'renren_after.html')

        try:
            l['school'] = response.xpath('//li[@class="school"]/span/text()').extract()[0][3:]
        except:
            l['school'] =''
        if not is_pku(l['school']):
            self.queue.pop(response.url)
            return
        l['name'] = response.xpath('//h1[@class="avatar_title"]/text()').extract()[0]
        l['numberOfFriends'] = response.xpath('//div[@class="has-friend"]/h4/span/text()').extract()[0]
        l['numberOfViews'] = re.findall('^[^0-9]*([0-9]*).*$', response.xpath('//h1[@class="avatar_title"]/span/text()').extract()[0], re.S)[0]
        try:
            l['gender'] = response.xpath('//li[@class="birthday"]/span[1]/text()').extract()[0]
        except:
            l['gender'] = ''
        l['url'] = response.url
        l['ID']= response.url.split('/')[-2]
	l['image_urls'] = response.xpath('//img[@id="userpic"]/@src').extract()

        # generate a special callback function of l
        # yield l when the info of l have been all collected
        def gen_l(l):
                def get_field(response, s):
                        return response.xpath(s).extract()

                def get_info(response):
                        l['department'] = get_field(response, '//div[@id="educationInfo"]/div[@class="info-section-info"]/dl[1]/dd[1]/a[last()]/text()')
                        l['birthyear'] = get_field(response, '//div[@id="basicInfo"]/div[@class="info-section-info"]/dl[2]/dd[1]/a[1]/text()')
                        l['birthmonth'] = get_field(response, '//div[@id="basicInfo"]/div[@class="info-section-info"]/dl[2]/dd[1]/a[2]/text()')
                        l['birthday'] = get_field(response, '//div[@id="basicInfo"]/div[@class="info-section-info"]/dl[2]/dd[1]/a[3]/text()')
                        l['constellation'] = get_field(response, '//div[@id="basicInfo"]/div[@class="info-section-info"]/dl[2]/dd[1]/a[4]/text()')
                        l['hometown'] = get_field(response, '//div[@id="basicInfo"]/div[@class="info-section-info"]/dl[3]/dd[1]/text()')

                def parse_l(response):
                        print l['url']
                        get_info(response)
                        # pop the url from the queue in a file
                        self.queue.pop(l['url'])
                        yield l

                return parse_l

        parse_l = gen_l(l)

        for url in response.xpath('//a/@href').extract():
            if is_matching_main_page(url):
                # push the url into the queue in a file
                if (self.queue.push(url) == True):
                    yield scrapy.Request(url, callback=self.parse)
            elif is_matching_recent_view(url):
                url = 'http://www.renren.com/'+url.split('=')[-1] +'/profile'
                # push the url into the queue in a file
                if (self.queue.push(url) == True):
                    yield scrapy.Request(url, callback=self.parse)

        # go to the information page
        # LIFO, this request must be served first
        info_url = response.xpath('//div[@class="info-bottom clearfix"]/a[@class="editinfo"]/@href').extract()[0]
        yield scrapy.Request(info_url, callback=parse_l)

        '''
        l = ItemLoader(item=Profile(), response=response)
        l.add_xpath('school', '//li[@class="school"]/span/text()')
        l.add_xpath('name', '//h1[@class="avatar_title"]/text()')
        l.add_xpath('numberOfFriends', '//div[@class="has-friend"]/h4/span/text()')
        l.add_xpath('numberOfViews', '//h1[@class="avatar_title"]/span/text()')
        l.add_xpath('gender','//li[@class="birthday"]/span[1]/text()')
        '''
