from TwitterParser import TwitterURLConverter;
from TwitterParser import TwitterRelationExtractor;

checkPointFreq = 1000;

class TwitterGraphCrawler:
    def __init__(self, dataDirPath, inputFileName, outputFileName):
        self.dataDirPath = dataDirPath;
        self.inputFileName = inputFileName;
        self.outputFileName = outputFileName;

    def run(self):
        cnt = 0;
        totalSeedcnt = 0;
        seeds = self.loadSeed(self.dataDirPath + '/' + self.inputFileName);
        crawled = set();
        while True:
            seed = 0;
            try:
                seed = seeds.pop(0);
            except:
                break; 
            totalSeedcnt -= 1;
            newSeeds = self.crawlAbout(seed);
            crawled.add(seed);
            seedCnt = 0;
            for newSeed in newSeeds:
                if crawled.__contains__(newSeed):
                    continue;
                seeds.append(newSeed);
                seedCnt += 1;
                totalSeedcnt += 1;
            print 'adding', seedCnt, '; now there are', totalSeedcnt;
            cnt += 1;
            if( cnt % checkPointFreq == 0) :
                self.storeCheckPoint(crawled, seeds);

    # load all seeds from a file                
    def loadSeed(self, filePath):                
        return map(str.strip, open(filePath).readlines());

    # crawl related seeds from a seed        
    def crawlAbout(self, id):
        converter = TwitterURLConverter();
        extractor = TwitterRelationExtractor();
        friendXMLURL = converter.getFriendXMLURL(id);
        ids = extractor.extractIds(friendXMLURL);
        followerXMLURL = converter.getFollowerXMLURL(id);
        ids = ids.union(extractor.extractIds(followerXMLURL));
        return ids;

    # store a check point        
    def storeCheckPoint(self, crawled, toCrawl):
        crawledWriter = self.dataDirPath + '/crawled.dat';
        for crawledSeed in crawled:
            crawledWriter.write(crawledSeed + '\n');
        crawledWriter.close();

        toCrawlWriter = self.dataDirPath + '/tocrawl.dat';
        for toCrawlSeed in toCrawl:
            toCrawlWriter.write(toCrawlSeed + '\n');
        toCrawlWriter.close();
        return 0;
        

if __name__ == '__main__':
    import sys;
    inputFileName = 'input.seeds';
    outputFileName = 'output.seeds';
    dataDirPath = sys.argv[1];
    crawler = TwitterGraphCrawler(dataDirPath, inputFileName, outputFileName);
    crawler.run();
