#!/usr/bin/python
#coding=utf8
#
# Copyright (C) 2011 XNData contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#      http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.


__author__ = "zhangchitc@gmail.com"


import urllib2, urllib
import re
from BeautifulSoup import BeautifulSoup          # For processing HTML
from BeautifulSoup import BeautifulStoneSoup     # For processing XML
import xndata.client
import xndata.data.info
import json

class InfoUrlMgr:

    @staticmethod
    def friend_list_url (user_id, cur_page):
        return "http://friend.renren.com/GetFriendList.do?curpage=%d&id=%s" % (
                cur_page, user_id)

    @staticmethod
    def profile_url (user_id):
        return "http://www.renren.com/profile.do?id=%s&v=info_ajax&undefined" % user_id

    @staticmethod
    def contact_url (user_id):
        return "http://www.renren.com/showcard?friendID=%s" % user_id

class InfoPageFetcher:

    @staticmethod
    def fetch_friend_list_page (opener, user_id, cur_page):
        request = urllib2.Request (InfoUrlMgr.friend_list_url (user_id, cur_page))
        return opener.open (request).read ()

    @staticmethod
    def fetch_profile_page (opener, user_id):
        request = urllib2.Request (InfoUrlMgr.profile_url (user_id))
        return opener.open (request).read ()

    @staticmethod
    def fetch_contact_page (opener, user_id):
        request = urllib2.Request (InfoUrlMgr.contact_url (user_id))
        return opener.open (request).read ()


def nvgstr2str (s):
    return unicode (s).encode ('utf-8')


class InfoPageParser:
    
    @staticmethod
    def get_total_friend (html):
        soup = BeautifulSoup(html)
        node = soup.find("div", { "id" : "toc" })
        return int (node.p.span.string)

    @staticmethod
    def get_friend_list (html, user_id, cur_page):
        soup = BeautifulSoup (html)
        allfriend = soup.find ("ol", { "id" : "friendListCon" }).findAll (
                        name="li", recursive=False)
        friend_list = xndata.data.info.FriendList (
                        user_id = user_id, cur_page = cur_page)

        for friend_node in allfriend:
            href = friend_node.p.a["href"]
            m = re.compile (r"id=(?P<id>\d*)").search (href)
            user_id = str (m.group ("id")) 
            tiny_url = str (friend_node.p.a.img["src"])
            name = str (friend_node.div.dl.dd.a.string)

            info_type = str (friend_node.findAll ("dt")[1].string).strip ()
            info = str (friend_node.findAll ("dd")[1].string).strip ()
            
            sprofile = xndata.data.info.SimpleProfile (
                    user_id = user_id, name = name, tinyphoto_url = tiny_url)
            if info_type == "城市":
                sprofile.city = info
            if info_type == "学校":
                sprofile.school = info
            
            friend_list.addFriend (sprofile)

        return friend_list

    @staticmethod
    def get_profile (html, user_id):
        # get rid of embedded javascript 
        while html.find ("<script") != -1:
            a = html.find ("<script")
            b = html.find ("/script>")
            html = html[:a] + html[b + 8:]
        html = "<html><head><title>xx</title></head><body>" + html + "</body></html>"
        soup = BeautifulSoup (html)
        
        profile = xndata.data.info.Profile ()
        profile.set_uid (user_id)

        dts = soup.findAll (name="dt")
        dds = soup.findAll (name="dd")
        for dt, dd in zip (dts, dds):
            if dt.string == u"性别 :":
                # Attention!! dd.string is not a unicode string
                # It returns a BeautifulSoup built in type NavigableString
                # Because sex is Chinese represented so str does not work
                profile.set_sex (nvgstr2str (dd.string))

            if dt.string == u"生日 :" and len (dd.findAll ("a")) == 4:
                year = int (dd.findAll("a")[0].string)
                month = int (dd.findAll("a")[1].string)
                day = int (dd.findAll ("a")[2].string)
                profile.set_birth (year, month, day)
            
            if dt.string == u"家乡 :":
                if len (dd.findAll ("a")) > 0:
                    # Province is also represented in Chinese
                    province = nvgstr2str (dd.findAll ("a")[0].string)
                    profile.set_hometown_p (province)
                if len (dd.findAll ("a")) > 1:
                    # City the same
                    city = nvgstr2str (dd.findAll ("a")[1].string)
                    profile.set_hometown_c (city)

            if dt.string == u"大学 :":
                if len (dd.findAll ("a")) > 0:
                    # Also collge
                    college = nvgstr2str (dd.findAll ("a")[0].string)
                    profile.set_college (college)
                if len (dd.findAll ("a")) > 1:
                    graduation = int (dd.findAll ("a")[1].string[:-1])
                    profile.set_college_grad (graduation)
                if len (dd.findAll ("a")) > 2:
                    dept = nvgstr2str (dd.findAll ("a")[2].string)
                    profile.set_college_dept (dept)

            if dt.string == u"高中 :":
                profile.set_highschool (nvgstr2str (dd.a.string))
 
            if dt.string == u"初中 :":
                profile.set_juniorschool (nvgstr2str (dd.a.string))
 
            if dt.string == u"小学 :":
                profile.set_primaryschool (nvgstr2str (dd.a.string))
            
        return profile
               

    @staticmethod
    def get_contact_info (html):
        ret = {}
        contact = json.loads (html)
        m = re.compile(r'"name":"(?P<name>.*?)"').search (html)
        ret["name"] = m.group ("name")

        if contact[u"qq"] != u"-1" and contact[u"qq"] != u"":
            ret["qq"] = str (contact[u"qq"])
        else:
            ret["qq"] = ""
        
        if contact[u"msn"] != u"-1" and contact[u"msn"] != u"":
            ret["email"] = str (contact[u"msn"])
        else:
            ret["email"] = ""
        
        if contact[u"email"] != u"-1" and contact[u"email"] != u"":
            ret["email"] = str (contact[u"email"])
        else:
            ret["email"] = ""

        if contact[u"phone"] != u"-1" and contact[u"phone"] != u"":
            ret["phone"] = str (contact[u"phone"])
        else:
            ret["phone"] = ""

        return ret



if __name__ == "__main__":
    client = xndata.client.XNClient ("zhangchitc@gmail.com", "aaaaaaaa")
    #html = PhotoPageFetcher.fetch_album_list_page (client.opener, "242124580", 0)
    #print PhotoPageParser.get_album_list (html)
    # other's album
    #html = PhotoPageFetcher.fetch_photo_list_page (client.opener, "247780420", "282652960", 1)
    # my album
    #html = PhotoPageFetcher.fetch_photo_list_page (client.opener, "242124580", "432244996", 0)
    #print PhotoPageParser.get_photo_list (html)
    #html = InfoPageFetcher.fetch_profile_page (client.opener, "247780420")
    #print InfoPageParser.get_total_friend (html)
    #InfoPageParser.get_profile (html, "247780420", 0)
    html = InfoPageFetcher.fetch_contact_page (client.opener, "247780420")
    #print InfoPageParser.get_total_friend (html)
    InfoPageParser.get_contact_info (html)

