import blogcrawlerUtil;
import BlogElements;
import re;
import xml.sax;
import os;
import feedparser;
import time;

class TwitterURLConverter:
    def getFriendXMLURL(self, id):
        return 'http://twitter.com/friends/ids.xml?user_id=' + str(id);
    def getFollowerXMLURL(self, id):
        return 'http://twitter.com/followers/ids.xml?user_id=' + str(id);
    def getSearchURL(self, term):
        return 'http://search.twitter.com/search.atom?q='+ term;
    def getName(self, URL):
        return URL[19:];        
    def name2Seed(self, name):
        return 'http://twitter.com/' + name;

class TwitterNameExtractor(xml.sax.ContentHandler):
    def __init__(self):
        self.names = set();
        self.pattern = re.compile('http://twitter.com/[a-zA-Z0-9]+');
        self.converter = TwitterURLConverter();

    def extractNames(self, URL):
        names = [];
        doc = feedparser.parse(URL);
        if doc.entries:
            for entry in doc.entries:
                homeURL = entry['href'];
                names.append(self.converter.getName(homeURL));
        return names;


class Tag:
    def __init__(self, name, attrs):
        self.name = name;
        self.attrs = attrs;    

    def __str__(self):
        text = '< ' + self.name;        
        for key in self.attrs.keys():
            text += ' ' + key + '=' + self.attrs[key];
        text += '>';
        return text;

class TwitterHomeParser(xml.sax.ContentHandler):
    def __init__(self):
        self.postings = [];
        self.parents = [];
        self.url = '';
        self.currContent = '';
        self.currURL = '';
        self.currDate = '';
        self.format = '%I:%M %p %b %d %Y';

    def parse(self, url):
        self.url = url;
        self.postings = [];
        filePath = 'temp.html';
        blogcrawlerUtil.crawlPage(filePath, url);
        try:
            xml.sax.parse(filePath, self);        
        except:
            print 'parsing error!'
            return self.postings;
        return self.postings;

    def startElement(self, name, attrs):
        self.parents.append(Tag(name, attrs));
        if name == 'a' and attrs.has_key('href'):
            url = attrs['href'];
            if url.startswith(self.url + '/status'):
                self.currURL = url;

    def endElement(self, name):
        self.parents.pop();        

    def characters(self, content):
        parent = self.parents[-1];
        if parent.name == 'span' and parent.attrs.has_key('class') and parent.attrs['class'] == 'entry-content':
            self.currContent = content;
        if parent.name == 'span' and parent.attrs.has_key('class') and parent.attrs['class'] == 'published':
            if content.find('hours ago') >= 0 or content.find('minutes ago') >= 0:
                type = '';
                dist = '';
                tokens = content.split();
                i = len(tokens) - 1;
                while(i >=0):
                    if tokens[i] == 'hours' or tokens[i] == 'minutes':
                        type = tokens[i];
                        dist = int(tokens[i-1]);
                        break;
                    i -= 1;
                currtime = time.time();
                if type == 'hours':
                    self.currDate = currtime - dist * 3600;
                elif type == 'minutes':
                    self.currDate = currtime - dist * 60;
                else :
                    return;
            else:    
                content = content[:-2] + ' 2009';
                try:
                    date = time.strptime(content, self.format);
                    self.currDate = time.mktime(date);
                except:
                    return;
            posting = BlogElements.BlogPosting();
            posting.url = self.currURL;
            posting.title = self.currContent;
            posting.date = self.currDate;
            self.postings.append(posting);
            
        
if __name__ == '__main__':
    #time.strptime('10:25 PM Jun 23','%I:%M %p %b %d');
    url = 'http://twitter.com/dongdongqiang'
    parser = TwitterHomeParser(); 
    postings = parser.parse(url);
    print len(postings);


class TwitterSeedParser(xml.sax.ContentHandler):
    def __init__(self):
        self.pattern = re.compile('/statuses/user_timeline/[0-9a-zA-Z]+.rss');

    def startElement(self, name, attrs):
        if name == 'a':
            if attrs.has_key('href'):
                url = attrs['href'];
                print url;
                if re.match(self.pattern, url):
                    self.rss = 'http://twitter.com' + url;


    def parse(self, url):
        try:
            rss = self.getRss(url);
        except:
            print 'parsing',url,'failed';
            return 0;
        doc = feedparser.parse(rss);
        postings = [];
        for entry in doc.entries:
            posting = BlogElements.BlogPosting();
            posting.title = entry['title'];
            posting.date = time.mktime(entry['updated_parsed']);
            posting.url = entry['link'];
            postings.append(posting);
        print 'getting',len(postings),'postings from',url;
        return postings;

    def getRss(self,url):
        filePath = 'temp.html';
        blogcrawlerUtil.crawlPage(filePath, url);
        xml.sax.parse(filePath, self);        
        return self.rss;

    def startElement(self, name, attrs):
        if name == 'a':
            if attrs.has_key('href'):
                url = attrs['href'];
                if re.match(self.pattern, url):
                    self.rss = 'http://twitter.com' + url;
 
