#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import with_statement

from BeautifulSoup import BeautifulSoup
from datetime import datetime, date
import threading, re, urlparse, urllib
from cStringIO import StringIO
import traceback, string, time
from threading import Semaphore, Thread
try:
    import cPickle as pickle
except:
    import pickle

#from local
from lib.utils import Logger, DummyLogger, transcode5, timeago
from lib.database_msn import Database
from lib.network import clean_url, fetch_url3
from lib.fetch_new_aio import fetch_url

def add_new_user_ids(new_user_ids):
    if new_user_ids:
        with queuelock:
            if len(queue) < 10:
                for newid in new_user_ids:
                    if newid not in queue:            
                        friendqueue.append(str(newid))
            else:
                new_user_ids = []

def MonthEng2Num(month_eng):
    #print month_eng
    if month_eng == 'January':
        return 1
    if month_eng == 'February':
        return 2
    if month_eng == 'March':
        return 3
    if month_eng == 'April':
        return 4
    if month_eng == 'May':
        return 5
    if month_eng == 'June':
        return 6
    if month_eng == 'July':
        return 7
    if month_eng == 'August':
        return 8
    if month_eng == 'September':
        return 9
    if month_eng == 'October':
        return 10
    if month_eng == 'November':
        return 11
    if month_eng == 'December':
        return 12
    else:
        print month_eng
        return error

class Bot(threading.Thread):
    def __init__(self, name, source, logger, dal):
        self.id = name
        self.source = source
        self.logger = logger
        self.name = name
        self.last_time = datetime.now()
        self.XXX_year = 2010
        self.XXX_month = 0
        self.new_user_ids = set()
        self.dal = dal
        threading.Thread.__init__(self, name = self.name)

    def __del__(self):
        del self.dal

    def crawl(self):
        if self.source == 'name':
            message = self.dal._checkUserName(self.name)
        else :
            message = self.dal._checkUserId(self.name)
 
        if message == "haveCrawded":
            self.logger.log('>ALREADY crawled uid ' + self.name, self.name)
            return 'visited'

        elif message == "haveCrawded_none": 
            self.logger.log('>Without blogs last time uid ' + self.name, self.name)
            return 'visited'

        if self.source == 'id':
            url = 'http://cid-' + self.id + '.spaces.live.com/?_c11_BlogPart_BlogPart=summary&_c=BlogPart'
            content = fetch_url3(url, self.logger ,retries = 2)

            Name = re.findall(r"http://(\w+)\.spaces", content)
            if Name:
                self.name = Name[0]
                url = 'http://' + self.name +'.spaces.live.com/?_c11_BlogPart_BlogPart=summary&_c=BlogPart'
                content = fetch_url3(url, self.logger, retries = 2)

        else:
            url = 'http://' + self.name + '.spaces.live.com/?_c11_BlogPart_BlogPart=summary&_c=BlogPart'
            content = fetch_url3(url, self.logger ,retries = 2)

            id = re.findall(r"http://cid-(\w+)\.profile", content)
            if id:
                self.id = id[0]
            else:
                return 'errorid'

        global bots_list
        #print 'add '+self.name+' into '+str(bots_list)
        if not self.name in bots_list:
            bots_list.append(self.name)
        else:
            return 'alreadyDoing'

        if 'Object moved' in content:
	    self.logger.log('no spaces blog at all' + self.id, self.id)
	    return 'blank'
        if 'You need to have permission to view this space.' in content:
	    self.logger.log('blogs is not available ' + self.id, self.id)
	    return 'blank'

           
        url_profile = 'http://cid-' + self.id +'.profile.live.com/details/'
        content_profile = fetch_url3(url_profile, self.logger, retries = 2)
        content_profile = transcode5(content_profile, decode_uni =True)
        soup = BeautifulSoup(content_profile, fromEncoding='utf-8')
        ret = self.crawlprofile(soup)
        if ret == 'error':
            return 'errorprofile'

        content = transcode5(content, decode_uni =True)
        ret = self.crawlblogs(content, True)
        self.dal._finishUser(str(self.id))
        if ret == "noblog":
		self.logger.log('no spaces blog at all' + self.id, self.id)
		return 'blank'

        return True

    def crawlprofile(self, soup):
        age = 0
        sex = "ELSE"
        location = "blank"
        familyname = "blank"
        lastname = "blank"
        job = "blank"
        interests = "blank"

        year = "1900"
        month = "1"
        day = "1"
        birthday_str = ''

        profileSoup = soup.find('div', attrs={"id": "sn_dt_left"})

        try:
          nameSoup = profileSoup.find('span', attrs={"dir": "ltr"})
        except:
          self.logger.warn(str(profileSoup),self.name)
          return 'error'
        #print nameSoup
        names = re.split(" ",nameSoup.find(text = True))
        lastname = names[0]
        if len(names) > 1:
            familyname = names[1]
        infos = profileSoup.findAll(text = True)
        info = ''
        mail =''
        for txt in infos:
            if txt != '\n':
                info = info + ''.join(txt) + ''.join('@@')

        for i in range(len(infos)):
           stri = infos[i]
           if "years old" in stri:
               age = re.search(r'(\d+)', stri).group(0)
           if "Personal IM" in stri:
               mail = infos[i+1]
               i = i + 1
           if "Personal e-mail" in stri:
               mail = infos[i+1]
               i = i + 1
           if "Male" in stri:
               sex = "Male"
           if "Female" in stri:
               sex = "Fema"
           if "Location" in stri:
               location = infos[i + 1]
               i = i + 1
           if "Occupation" in stri:
               job = infos[i + 1]
               i = i + 1
           if "Interests" in stri:
               interests =  infos[i + 1]
               i = i + 1
           if "Birthday" in stri:
               birthday_str =  infos[i + 1]
               i = i + 1
               birthday_str = re.split(" ", birthday_str)
               if len(birthday_str) == 1:
                   year = birthday_str[0]
               elif len(birthday_str) > 2:
                   month = MonthEng2Num(birthday_str[0])
                   day = birthday_str[1][0:-1]
                   year = birthday_str[2]
               else:
                   month = MonthEng2Num(birthday_str[0])
                   day = birthday_str[1]

        birthday = date(int(year), int(month), int(day))
        #print profileSoup.prettify()
        #print age,sex,location, familyname,lastname,job,interests,birthday
        #print type(lastname)
        #lastname = transcode5(lastname)
        try:
            #print transcode5(location  + " " + self.id)
            self.dal._commitUser(self.id, self.name, transcode5(familyname), transcode5(lastname), sex, age, transcode5(job), transcode5(location), transcode5(interests), birthday, transcode5(info),mail)
        except:
            self.logger.log(lastname+" @@ "+location ,self.name)
        #self.dal._commitUser(self.id, self.name, familyname, lastname, sex, age, job, location, interests, birthday, info)

    def crawlblogs(self,content, isFirst):
        soup = BeautifulSoup(content,fromEncoding='utf-8')

        blogSoup = soup.findAll('table', attrs={"id" : "blogManagement"})
        
        if not blogSoup:
            return "noblog"
        else:
            blogSoup = blogSoup[0]

        typeSoup = blogSoup.findAll('td', attrs={"class": "SP_bmCatMed"})
        linkSoup = blogSoup.findAll('td', attrs={"class": "SP_bmTitleLarge"})
        dateSoup = blogSoup.findAll('td', attrs={"class": "SP_bmDate"})

        if not dateSoup:
            return "noblog"
           #最近的一篇文章日期
        first_date = dateSoup[0].findAll(text = True)
        datelist = first_date[0].split()
        #print first_date[0]

        if ',' in first_date[0]:
              pass
                  
        elif len(datelist) == 2:
            if isFirst:
                first_month = MonthEng2Num(datelist[0])

                now = datetime.now()
                now_month = now.month
                now_year = now.year

                if first_month <= now_month :
                    begin_year = now_year
                else:
                    begin_year = now_year - 1
                 
                #print begin_year

                for year in range(begin_year,2007,-1):
                    url = "'http://"+self.name+".spaces.live.com/?_c11_BlogPart_BlogPart=summary&_c=BlogPart&partqs=amonth%3d"+str(first_month)+"%26ayear%3d"+str(year)+"'"
                    content_forYear = fetch_url3(url, self.logger ,retries = 2)
                    content_forYear = transcode5(content_forYear, decode_uni =True) 

                    soup_forYear = BeautifulSoup(content_forYear, fromEncoding='utf-8')
                    dateSoup_forYear = soup_forYear.findAll('td', attrs={"class": "SP_bmDate"})
                    self.XXX_month = first_month
                    if not dateSoup_forYear:
                        continue
                    firstdate_forYear = dateSoup_forYear[0].findAll(text = True)
                    if first_date[2] == firstdate_forYear[2]:
                        self.XXX_year = year
                        #print 'match '+ str(self.XXX_year)
                        break

        elif len(datelist) == 1:
            pass
        else:
            print error

        for i in range(0,len(linkSoup)):
            try:
              if ',' in first_date[0]:
                month = MonthEng2Num(datelist[0][0:-1])
                day = '1'
                year = datelist[1]
                detailTime = dateSoup[i].findAll(text = True)[2]

              elif len(datelist) == 2:
                time_info = dateSoup[i].findAll(text = True)
                month = MonthEng2Num(time_info[0].split()[0])
                day = time_info[0].split()[1]
                detailTime = time_info[2]

                if self.XXX_month < month:
                    self.XXX_year -= 1
                self.XXX_month = month
                year = str(self.XXX_year)
  
              elif len(datelist) == 1:
                time_info = dateSoup[i].findAll(text = True)
                month = time_info[0].split('/')[0]
                day = time_info[0].split('/')[1]
                year = time_info[0].split('/')[2]
                detailTime = time_info[2]
            except:
                self.logger.warn(str(dateSoup),self.name)
                self.logger.warn(str(i),self.name)
                if time_info:
                    self.logger.warn(str(time_info),self.name)
                return "error"

            #print str(month)+" "+day+" "+year + " " + detailTime

            if len(detailTime) > 8:
              try:
                timeq = time.strptime(str(month)+" "+day+" "+year + " " + detailTime, "%m %d %Y %I:%M:%S %p")
              except:
                timeq = time.strptime(str(month)+" "+day+" 1904 " + detailTime, "%m %d %Y %I:%M:%S %p")
              tt = timeq[:-2]
              _time = datetime(*tt)
            else:
              try:
                timeq = time.strptime(str(month)+" "+day+" "+year + " " + detailTime, "%m %d %Y %I:%M %p")
              except:
                timeq = time.strptime(str(month)+" "+day+" 1904 " + detailTime, "%m %d %Y %I:%M %p")
              tt = timeq[:-2]
              _time = datetime(*tt)
            #print _time
            blog_type = typeSoup[i].find(text = True)
            if not blog_type:
               blog_type = 'no type'

            _a = linkSoup[i].find('a')
            url_Detail = _a['href']

            title = linkSoup[i].find(text = True)
            if not title:
               title = 'no title'

            #print str(_time) + " " + url_Detail

            if self.dal._checkBlog(url_Detail) == 'noblog':
                ###fetch_url(url_Detail, [self.crawlDetailPage, url_Detail, blog_type, _time, title])
                content = fetch_url3(url_Detail, self.logger ,retries = 2)
                      
                self.crawlDetailPage(content, url_Detail, blog_type, _time, title)

        nextSoup = soup.find('a', attrs={"title": "View next 20 entries"})
        next_url = nextSoup['href']
        if isFirst:
            while next_url != "#":
                content = fetch_url3("'"+next_url+"'", self.logger ,retries = 2)
                content = transcode5(content, decode_uni =True)
                next_url = self.crawlblogs(content, False)
        else:
            return next_url

    def crawlDetailPage(self, content, url_Detail, blog_type, _time, title):
        self.logger.log("start crawl blog " + title + "  " + url_Detail, self.id)
        blogid = re.search("(?<=!)\d+", url_Detail[-15:-1]).group(0)

        content = transcode5(content, decode_uni =True)
        soup = BeautifulSoup(content,fromEncoding='utf-8')

        textSoup = soup.find('div', attrs={"class": "bvMsg"})
        content = str(textSoup)


        #print url_Detail
        #print textSoup
        if textSoup:
            plainSoup = textSoup.findAll(text = True)
            plaintext = ''
            for txt in plainSoup:
               if txt :
                   plaintext += ''.join(txt)
            #print plaintext
            num_comment = soup.find('span', attrs={"id": "sn_ccNumCommentControl"})
            if num_comment:
               num_comment = num_comment.find(text = True)
               newids = soup.findAll('a', attrs={"class": "cxp_ic_name"})
               for newidSoup in newids:
                   new_id = re.findall(r"http://cid-(\w+)\.profile", newidSoup['href'])[0]
                   self.new_user_ids.add(new_id)
               add_new_user_ids(self.new_user_ids)
            else :
               num_comment = 0
            #print plaintext
            self.dal._commitBlog(self.id, blogid, title, url_Detail,  _time, num_comment, blog_type, transcode5(content), transcode5(plaintext))


    def doit(self):
        ret = 'fail'
        try:
            self.logger.log("START crawling " + self.name  + self.source)
            ret = self.crawl()
        except Exception, e:
            sio = StringIO()
            traceback.print_exc(file=sio)
            self.logger.warn('Bot throw exception abnormally', self.name)
            self.logger.warn(sio.getvalue(), self.name)
            self.logger.warn(str(e), self.name)
            sio.close()

        if ret == 'alreadyDoing' :
            #print 'alreadyDoing'
            return
        elif ret == 'errorid':
            #print 'errorid'
            return
        elif ret == 'visited':
            #print 'visited'
            return
        elif ret == True:
            #print 'True'
            add_new_user_ids(self.new_user_ids)
        elif ret == 'blank' :
            #print 'blank'
            self.dal._commitUserNoBlog(self.id,self.name)
        elif ret == 'errorprofile':
            #print 'errorprofile'
            self.logger.log('wrong name '+self.name,self.name)
        else:
            #print 'else'
            self.dal._finishUser(str(self.id), status=False)

#        if self.source == 'name':
#            self.dal._finishSeed(self.name)

        global bots_list
        #print 'remove '+self.name +' from '+str(bots_list)
        bots_list.remove(self.name)
        bots_list = []


    def run(self):
        global bots_semaphore
        try:
          self.doit()
        except:
          pass
        bots_semaphore.release()

bots_semaphore = threading.Semaphore(16)
queue = []
friendqueue = []
queuelock = threading.RLock()
bots_list = []

def crawl(logpath = None):
    logger = Logger(logpath) if logpath else DummyLogger()
    dal = Database()
    page_num = 0
    ret = True
    while ret:
        if friendqueue:
            msnName = str(friendqueue[0])
            source = 'id'
            del friendqueue[0:1]
        elif queue:
            msnName = str(queue[0])
            source = 'name'
            del queue[0:1]
            dal._finishSeed(msnName)
        else:
            ret = dal._getSeeds(page_num)
            if not ret:
                break
            page_num += 1
            for x in ret:
                queue.append(x[0])
            continue
        #msnName = 'palovepapa223'
        #source = 'name'
        bot = Bot(msnName, source, logger, dal)
        bots_semaphore.acquire()
        bot.start()
        #break

crawl('./crawlmsn.log')
