import os;
from blogcrawlerUtil import crawlPage
import TimeFormat;
import BlogElements;

seedPath = 'seed.xml';

class SeedBasedCrawler:
    def __init__(self, dataDirPath, urlConverter, seedParser):
        self.dataDirPath = dataDirPath;
        self.urlConverter = urlConverter;
        self.seedParser = seedParser;
        self.seedFilePath = self.dataDirPath + '/sorted.seeds';
        self.currUpdatePath = self.dataDirPath + '/curr.update.dat';
        self.prevUpdatePath = self.dataDirPath + '/prev.update.dat';
        self.postingFilePath = self.dataDirPath + '/posting.dat';
        self.currName = 0;
        self.update = {};

    # recovered the last updated db
    def recover(self):
        self.recoverUpdate(self.prevUpdatePath);
        self.currName = self.recoverUpdate(self.currUpdatePath);
        self.restoreUpdate(self.prevUpdatePath);

    def recoverUpdate(self, path):
        currname = 0;
        if os.path.exists(path):
            file = open(path);
            while True:
                line = file .readline();
                if not line:
                    break;
                tokens = line.split();
                self.update[tokens[0]] = tokens[1];
                currname = tokens[0];
            file.close();
        return currname;
        
    def restoreUpdate(self, path):
        file = open(path, 'w');
        for key in self.update.keys():
            file.write(key + ' ' + str(self.update[key]) + '\n');
        file.close();

    def crawl(self):
        self.recover();
        format = TimeFormat.FullNumberSlashFormat;
        self.recover();
        postingFile = open(self.postingFilePath, 'a');
        totalCount = 0;

        while True:
            seedsFile = open(self.seedFilePath);
            currUpdateWriter = open(self.currUpdatePath, 'w');
            # for each seed url
            while True:
                line = seedsFile.readline();
                if not line:
                    break;
                seedName = line.strip();
                if self.currName:
                    print self.currName;
                    if seedName == self.currName:
                        self.currName = 0;
                    continue;
                seedUrl = self.urlConverter.name2Seed(seedName);
                print seedUrl;
                postings = self.seedParser.parse(seedUrl);
                newCount = 0;
                if postings:
                    print len(postings);
                    lastUpdateDate = 0;
                    if self.update.has_key(seedName):
                        lastUpdateDate = self.update[seedName] ;        
                    maxDate = lastUpdateDate;
                    for posting in postings:
                        currDate = posting.date;
                        if currDate > lastUpdateDate:
                            postingFile.write(str(posting) + '\n');
                            postingFile.flush();
                            newCount += 1;
                            totalCount += 1;
                        if currDate > maxDate:
                            maxDate = currDate;
                    self.update[seedName] = str(maxDate);
                    currUpdateWriter.write(seedName+ ' ' + str(maxDate) + '\n');
                    currUpdateWriter.flush();
                    count = len(postings);
                    if count :
                        print 'adding ' + str(newCount) + ' of ' + str(count) + ' postings from ' + seedUrl + ' ' + str(totalCount) + ' postings now!';
            seedsFile.close();
            currUpdateWriter.close();
            os.system('mv ' + self.currUpdatePath + ' ' + self.prevUpdatePath);
            self.currName = 0;

if __name__ == '__main__':
    import sys;
    import TwitterParser;
    dataDirPath = sys.argv[1];
    urlConverter = TwitterParser.TwitterURLConverter();
    seedParser = TwitterParser.TwitterHomeParser();
    crawler = SeedBasedCrawler(dataDirPath, urlConverter, seedParser);
    crawler.crawl();
