import UrlDb;
import blogcrawlerUtil;

import time;
import os;


class BlogPageContentCrawler:    
    def __init__(self, config):
        self.feedPageParser = config.data[config.FEED_PARSER_VARNAME];
        self.profileCrawler = config.data[config.PROFILE_CRAWLER_VARNAME];
        self.commentPageParser = config.data[config.COMMENT_PARSER_VARNAME];
        self.feedExtractor = config.data[config.FEED_EXTRACTOR_VARNAME];
        self.sleepTime = float(config.data[config.CONTENT_SLEEP_TIME_VARNAME]);
        self.pipePath = config.data[config.PIPE_FILE_PATH_VARNAME];
        self.pipeLock = config.data[config.PIPE_LOCK_VARNAME];
        
        postingFilePath = config.data[config.POSTING_FILE_PATH_VARNAME];
        profileFilePath = config.data[config.PROFILE_FILE_PATH_VARNAME];
        logPath = config.data[config.LOG_FILE_PATH_VARNAME];
        crawlMode = config.data[config.CRAWL_MODE_VARNAME];
        
        t = time.time();
        self.tmpPath = '.' + str(t) + '.tmp.file';
        self.commentPath = '.' + str(t) + '.comment.file';
        self.feedPath = '.' + str(t) + '.feed.rss';
        
        postingUrlDbPath = str(t) + '.posting.url.db';
        profileUrlDbPath = str(t) + '.profile.url.db';
        if crawlMode == config.CONTINUE_MODE:
            self.postingUrlDb, self.profileUrlDb = self.recoverUrlDbs(postingUrlDbPath, profileUrlDbPath, postingFilePath, profileFilePath);
        else:
            self.postingUrlDb = UrlDb.UrlDb(postingUrlDbPath);
            self.profileUrlDb = UrlDb.UrlDb(profileUrlDbPath);
        
        if crawlMode == config.CONTINUE_MODE:
            openMode = 'a';
        elif crawlMode == config.RESTART_MODE:
            openMode = 'w';
        self.blogFile = open(postingFilePath, openMode);
        self.profileFile = open(profileFilePath, openMode);
        self.logFile = open(logPath, openMode);
        
    def recoverUrlDbs(self, postingUrlDbPath, profileUrlDbPath, postingFilePath, profileFilePath):
        postingUrlDb = UrlDb.UrlDb(postingUrlDbPath);
        postingUrlDb.loadFromPostingFile(postingFilePath);
        profileUrlDb = UrlDb.UrlDb(profileUrlDbPath);
        profileUrlDb.loadFromProfileFile(profileFilePath);
        return postingUrlDb, profileUrlDb;
         
    '''
        extract feed from a blog home page
    '''
    def extractFeed(self, pageUrl):
        return self.feedExtractor.extract(pageUrl);

    '''
        get content of a rss feed
    '''
    def getFeedContent(self, feedUrl):
        blogs = [];
        if blogcrawlerUtil.crawlPage(self.feedPath, feedUrl) >= 0:
            blogs = self.feedPageParser.parse(self.feedPath);
        return homeUrl, blogs;

    def updateBlogDb(self, homeUrl):
        #1. get blogs
        feedUrl = self.extractFeed(url);
        blogs = self.getFeedContent(feedUrl);
        blogs.sort(BlogElements.blogEntryDateCmp);
        
        #2. check whether homepage is new and update
        if not self.homeUrlDb.contains(homeUrl):
            profile = self.profileCrawler.getProfile(homeUrl);
            if profile:
                profile.url = homeUrl;
                self.profileFile.write(profile.__str__() + '\n');
                self.profileFile.flush();
                self.homeUrlDb[homeUrl] = '';
        else:
            print 'have had url',homeUrl;
        
        #3. check whether blog article is new and update
        totalNum = 0;
        addNum = 0;
        for blog in blogs:
            totalNum += 1;
            if self.postingUrlDb.has_key(str(blog.url)):
                break;
            addNum += 1;
            
            self.logFile.write(time.asctime() + ': adding ' + blog.url + '\n');
            self.entryUrlDb[str(blog.url)] = '';
            if crawlPage(self.tmpPath, blog.commentUrl) == 0:
                self.commentPageParser.parse(self.commentPath);
            text = blog.__str__();
            self.blogFile.write(text + '\n');
        print time.asctime(),': adding',addNum,'entries for blog',feedUrl;
        sys.stdout.flush();
        self.blogFile.flush();
        return (totalNum, addNum);
    
    '''
        deduplicate the duplicated lines
    '''
    def deduplicate(self, lines):
        s = set();
        for line in lines:
            url = line.strip();
            if url and url <> '':
                s.add(url);
        return list(s);
            
    def getUpdateHomeUrls(self):
        self.pipeLock.acquire();
        while(not os.path.exists(self.pipePath)):
            time.sleep(2);
            print 'not',self.pipePath;
        pipeFile = open(self.pipePath);        
        lines = pipeFile.readlines();
        pipeFile.close();
        self.pipeLock.release();
        urls = self.deduplicate(lines);
        os.remove(self.pipePath);
        return urls;
        
    
    def crawl(self):
        while 1:    
            t1 = time.time();
            totalEntryNum = 0;
            newEntryNum = 0;
            urls = self.getUpdateHomeUrls();
            for url in urls:
                updateHomeUrl = url;
                if updateHomeUrl and updateHomeUrl <> '':
                    (num1, num2) = self.updateBlogDb(updateFeedUrl);
                    totalEntryNum += num1;
                    newEntryNum += num2;
            print time.asctime(), ':', 'adding', newEntryNum, 'entries from' , totalEntryNum , '!';
            t2 = time.time();
            interval = t2 - t1;
            
            realSleepTime = self.sleepTime - interval;
            if(realSleepTime <= 0):
                realSleepTime = 0;
            time.sleep(realSleepTime);            
