# -*- coding: utf-8 -*-
import sys 
reload(sys) 
sys.setdefaultencoding("utf-8") 
import re
import scrapy
from scrapy.selector import Selector
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from mainPage.utils import *
from scrapy.contrib.linkextractors import LinkExtractor
from scrapy.http import Request, FormRequest

from scrapy.contrib.loader import ItemLoader

def fst(m):
    return m.group(1)

def log_page(response, filename):
        with open(filename, 'w') as f:
            f.write("%s\n%s\n%s\n" % (response.url, response.headers, response.body))

class renrenSpider(scrapy.Spider) :
    name = "mainPage"
    allowed_domains = ["www.renren.com"]
    Ids=[]

    start_urls = [
        "http://page.renren.com/601805980/channel-fanslist",
        "http://page.renren.com/601541411/channel-fanslist",
        "http://page.renren.com/601541411/channel-fanslist?curpage=1",
        "http://page.renren.com/fanslist?pid=601561773",
        "http://page.renren.com/fanslist?curpage=1&pid=601561773",
        "http://page.renren.com/fanslist?pid=601599276",
        "http://page.renren.com/fanslist?curpage=1&pid=601599276",
        "http://page.renren.com/fanslist?curpage=2&pid=601599276",
        "http://page.renren.com/fanslist?pid=601463845",
        "http://page.renren.com/fanslist?curpage=1&pid=601463845",
        "http://page.renren.com/fanslist?curpage=2&pid=601463845",
        "http://page.renren.com/fanslist?curpage=3&pid=601463845",
    ]

    def start_requests(self):
        return [Request("http://www.renren.com/SysHome.do",callback = self.post_login)]

   
    def post_login(self, response):
    	log_page(response, "renren_login.html")
        return [FormRequest.from_response(response,   
        	 				
                            formdata = {
                            'email': 'pkunetworklab1@163.com',
                            'password': 'pku123456'
                            },
                            callback = self.after_login,
                            )]
    def after_login(self,response):
    	for url in self.start_urls:            
            yield self.make_requests_from_url(url)
      
    def parse(self, response):
        for url in response.xpath('//a/@href').extract():
            if re.compile(r'http://www\.renren\.com/profile\.do\?id=[\d]+$').match(url):
                Id = url.split("=")[-1]                
                if not Id in self.Ids:
                    self.Ids.append(Id)
                    fin = open('data.txt','a')
                    fin.write(url+'\n')
                    fin.close()

        

