import os;
from TwitterParser import TwitterNameExtractor;
from TwitterParser import TwitterURLConverter;

checkPointFreq = 1000;

class TwitterSeedCollector:
    def __init__(self, dataDirPath, inputFileName, outputFileName):
        self.dataDirPath = dataDirPath;
        self.inputFileName = inputFileName;
        self.outputFileName = outputFileName;
        self.converter = TwitterURLConverter();
        self.extractor = TwitterNameExtractor();
        self.crawledTokens = set();
        self.crawled = set();

    def recover(self):        
        print 'recovering......';
        crawledFilePath = self.dataDirPath + '/crawled.dat';
        if os.path.exists(crawledFilePath):
            crawledFile = open(crawledFilePath);
            self.crawled = set(map(str.strip, crawledFile.readlines()));
            crawledFile.close();

        crawledTokenFilePath = self.dataDirPath + '/crawledToken.dat';
        if os.path.exists(crawledTokenFilePath):
            crawledTokenFile = open(crawledTokenFilePath);
            self.crawledTokens = set(map(str.strip, crawledTokenFile.readlines()));
            crawledTokenFile.close();
        print 'recovered!'

    def run(self):
        self.recover();
        cnt = 0;
        file = open(self.dataDirPath + '/' + inputFileName);
        seedNum = len(self.crawled);
        while True:
            line = file.readline();
            if not line:
                break;
            tokens = line.strip().split();
            if not tokens:
                continue;
            for token in tokens:
                if self.crawledTokens.__contains__(token):
                    continue;
                self.crawledTokens.add(token);
                seeds = self.searchNames(token);
                if not seeds:
                    continue;
                for seed in seeds:
                    if self.crawled.__contains__(seed):
                        continue;
                    self.crawled.add(seed);
                    seedNum += 1;
                cnt += 1;
                if cnt % 2 == 0:
                    print cnt, ':', seedNum;
                if( cnt % checkPointFreq == 0) :
                    self.storeCheckPoint(self.crawled, self.crawledTokens);
        outFile = open(self.dataDirPath + '/' + outputFileName, 'w');
        for name in crawled:
            outFile.write(name + '\n');
        outFile.close();


    # crawl related seeds from a seed        
    def searchNames(self, token):
        url = self.converter.getSearchURL(token);
        return self.extractor.extractNames(url);

    # store a check point        
    def storeCheckPoint(self, crawled, tokens):
        crawledWriter = open(self.dataDirPath + '/crawled.dat', 'w');
        for crawledSeed in crawled:
            crawledWriter.write(crawledSeed + '\n');
        crawledWriter.close();

        crawledTokenWriter = open(self.dataDirPath + '/crawledToken.dat' , 'w');
        for token in tokens:
            crawledTokenWriter.write(token + '\n');
        crawledTokenWriter.close();
        return 0;
    
if __name__ == '__main__':
    import sys;
    inputFileName = 'input.terms';
    outputFileName = 'output.seeds';
    dataDirPath = sys.argv[1];
    collector = TwitterSeedCollector(dataDirPath, inputFileName, outputFileName);
    collector.run();
