import ProfileCrawler;
import blogcrawlerUtil;
import HomeUrlUpdator;
import StreamParser;
import StreamCrawler;

import threading;
import os;
'''
    some configure option is required but some others are optional
'''
REQUIRED = 'REQUIRED';
OPTIONAL = 'OPTIONAL';

BLOG_SITE_VARNAME = 'blog-site';
LIVESPACE_BLOG = 'livespace';
LIVEJOURNAL_BLOG = 'livejournal';
BLOGSPOT_BLOG = 'blogspot';
SINA_BLOG = 'sina';

CRAWL_MODE_VARNAME = "crawl-mode";
CONTINUE_MODE = "continue";
RESTART_MODE = "restart";

PROFILE_PIPE_PATH_VARNAME = 'profile-pipe-path';
PROFILE_PIPE_LOCK_VARNAME = 'profile-pipe-lock';
POSTING_FILE_PATH_VARNAME = 'posting-file-path';
PROFILE_FILE_PATH_VARNAME = 'profile-file-path';

LOG_FILE_PATH_VARNAME = 'log-file-path';

PROFILE_CRAWLER_VARNAME = 'profile-crawler';

class Config:
    def __init__(self, configPath):
        self.schema = {};
        self.data = {};
        self.load(configPath);
        self.commonGenerate();
        
    def load(self, configPath):
        file = open(configPath);
        if not file:
            print 'no config file', configPath, 'found!';
            sys.exit(-1);
        lines = file.readlines();
        linecnt = 1;
        for line in lines:
            line = line.strip();
            tokens = line.split('=');
            if len(tokens) <> 2:
                print 'config file format error at line', linecnt,':', line;
                sys.exit(-1);
            variable = tokens[0].strip();
            value = tokens[1].strip();
            self.data[variable] = value;
            linecnt += 1;
    
    '''
        to do:
            1. test all required config variables are set
            2. set default value to optional variable if not set
            3. test set variable value are valid(if there is a valid list)
    '''
    def commonGenerate(self):
        for key in self.schema.keys():
            if not self.data.has_key(key):
                if self.schema[key][0] == REQUIRED:
                    blogcrawlerUtil.fatal_error('You should specify \'' + key + '\' in config file!');
                elif self.schema[key][0] == OPTIONAL:
                    self.data[key] = self.schema[key][1];
            else:
                value = self.data[key];
                if len(self.schema[key]) >= 3:
                    if self.schema[key][2].count(value) == 0:
                        blogcrawlUtil.fatal_error('The value for \'' + key + '\' must in', self.schema[key][2]);
                        
    def createCrawler(self):
        '';

class BlogGraphCrawlerConfig(Config):
    def __init__(self, configPath):
        self.NAME_FILE_PATH_VARNAME = 'name-file';
        self.USER_FILE_PATH_VARNAME = 'user-file';
        self.COMMUNITY_FILE_PATH_VARNAME = 'community-file';
        self.FRIENDSHIP_FILE_PATH_VARNAME = 'friendship-file';
        self.COMMUNITYSHIP_FILE_PATH_VARNAME = 'communityship-file';
        
        self.schema = {self.NAME_FILE_PATH_VARNAME: [REQUIRED, ''],
                       self.USER_FILE_PATH_VARNAME: [REQUIRED, ''],
                       self.COMMUNITY_FILE_PATH_VARNAME: [REQUIRED, ''],
                       self.FRIENDSHIP_FILE_PATH_VARNAME: [REQUIRED, ''],
                       self.COMMUNITYSHIP_FILE_PATH_VARNAME: [REQUIRED, '']};
        Config.__init__(self, configPath);

class SeedBasedCrawlerConfig(Config):
    def __init__(self, configPath):
        self.data = {};
        self.BLOG_SITE_VARNAME = 'blog-site';
        self.SEEDS_FILE_PATH_VARNAME = 'seeds-file';
        self.POSTING_FILE_PATH_VARNAME = 'posting-file';
        self.POSTING_URL_FILE_PATH_VARNAME = 'posting-url-file';

        self.schema = {self.POSTING_FILE_PATH_VARNAME: [REQUIRED, ''],
            self.POSTING_URL_FILE_PATH_VARNAME : [REQUIRED, ''],
            self.SEEDS_FILE_PATH_VARNAME: [REQUIRED, '']};
        self.load(configPath);
        self.generate();

    def generate(self):
        print self.data;
        self.blogSite = self.data[self.BLOG_SITE_VARNAME] ;
        self.seedsFilePath = self.data[self.SEEDS_FILE_PATH_VARNAME];
        self.postingFilePath = self.data[self.POSTING_FILE_PATH_VARNAME];
        self.postingUrlFilePath = self.data[self.POSTING_URL_FILE_PATH_VARNAME];
        if self.blogSite == 'sina':
            import SinaParser;
            self.seedParser = SinaParser.SinaSeedParser();

    def createCrawler(self):
        from SeedBasedCrawler import SeedBasedCrawler;
        crawler = SeedBasedCrawler(self);
        return crawler;

class UpdatePageCrawlerConfig(Config):
    def __init__(self, configPath):
        self.data = {};
        self.BLOG_SITE_VARNAME = 'blog-site';
        
        self.UPDATE_SLEEP_TIME_VARNAME = 'update-sleep-time';
        self.CONTENT_SLEEP_TIME_VARNAME = 'content-sleep-time';
        
        self.POSTING_FILE_PATH_VARNAME = 'posting-file-path';
        self.PROFILE_FILE_PATH_VARNAME = 'profile-file-path';
        self.LOG_FILE_PATH_VARNAME = 'log-file-path';
        self.PIPE_FILE_PATH_VARNAME = 'pipe-file-path';
        self.THREAD_NUM_VARNAME = 'thread-num';
        
        self.FEED_PARSER_VARNAME = 'feed-parser';
        self.PROFILE_CRAWLER_VARNAME = 'profile-crawler';
        self.COMMENT_PARSER_VARNAME = 'comment-parser';
        self.URL_UPDATOR_VARNAME = 'url-updator';
        self.FEED_EXTRACTOR_VARNAME = 'feed-extractor';
        self.PIPE_LOCK_VARNAME = 'pipe-lock'; 
        self.HOME_URL_UPDATOR_VARNAME = 'home-url-updator';
        
        self.schema = {self.BLOG_SITE_VARNAME: [REQUIRED, '', [LIVESPACE_BLOG, BLOG_SITE_VARNAME]], 
                       self.UPDATE_SLEEP_TIME_VARNAME: [OPTIONAL, 10],
                       self.CONTENT_SLEEP_TIME_VARNAME: [OPTIONAL, 10],
                       self.POSTING_FILE_PATH_VARNAME: [REQUIRED],
                       self.PROFILE_FILE_PATH_VARNAME: [REQUIRED],
                       self.LOG_FILE_PATH_VARNAME: [REQUIRED],
                       self.PIPE_FILE_PATH_VARNAME: [REQUIRED],
                       self.THREAD_NUM_VARNAME: [OPTIONAL, 1],
                       CRAWL_MODE_VARNAME: [OPTIONAL, CONTINUE_MODE, [CONTINUE_MODE, RESTART_MODE]],
                       PROFILE_PIPE_PATH_VARNAME: [REQUIRED]};
        self.load(configPath);
        self.generate();
        
    
    def generate(self):
        self.commonGenerate();
        blogSite = self.data[self.BLOG_SITE_VARNAME];
        if blogSite == 'livespace':
            import LiveSpaceParser;
            self.data[self.FEED_PARSER_VARNAME] = LiveSpaceParser.LiveSpaceFeedPageParser();
            self.data[self.COMMENT_PARSER_VARNAME] = LiveSpaceParser.LiveSpaceCommentPageParser();
            self.data[self.FEED_EXTRACTOR_VARNAME] = LiveSpaceParser.LiveSpaceFeedExtractor();
            profileUrlExtractor = LiveSpaceParser.LiveSpaceProfileUrlExtractor();
            profileParser = LiveSpaceParser.LiveSpaceProfileParser;
            self.data[self.PROFILE_CRAWLER_VARNAME] = ProfileCrawler.ProfileCrawler(profileUrlExtractor, profileParser);
            self.data[self.HOME_URL_UPDATOR_VARNAME] = HomeUrlUpdator.CrawlUpdator(LiveSpaceParser.updateUrl, LiveSpaceParser.LiveSpaceUpdateParser());
        elif blogSite == 'blogspot':
            import BloggerPageParser;
            self.data[self.FEED_PARSER_VARNAME] = BloggerParser.BloggerFeedPageParser();
            self.data[self.COMMENT_PARSER_VARNAME] = BloggerParser.BloggerCommentPageParser();
            self.data[self.FEED_EXTRACTOR_VARNAME] = BloggerParser.BloggerFeedExtractor();
            profileUrlExtractor = BloggerParser.BloggerProfileUrlExtractor();
            profileParser = BloggerParser.BloggerProfileParser();
            self.data[self.PROFILE_CRAWLER_VARNAME] = ProfileCrawler.ProfileCrawler(profileUrlExtractor, profileParser);
            self.data[self.HOME_URL_UPDATOR_VARNAME] = HomeUrlUpdator.CrawlUpdator(BloggerParser.updateUrl, BloggerParser.BlggerUpdateParser());
                
        self.data[self.PIPE_LOCK_VARNAME] = threading.Lock();
        
    def createCrawler(self):
        from BlogCrawler import UpdatePageBlogCrawler;
        crawler = UpdatePageBlogCrawler(self);
        return crawler;

class AllInStreamBlogCrawlerConfig(Config):
    def __init__(self, configPath):
        self.data = {};        
        self.CRAWL_MODE_VARNAME = 'crawl-mode';
        self.TASK_ID_VARNAME = 'task-id';
        self.STREAM_PARSER_THREAD_NUM = 'stream-parser-thread-num';
        self.PROFILE_CRAWLER_THREAD_NUM = 'profile-crawler-thread-num';
        
        self.schema = {BLOG_SITE_VARNAME: [REQUIRED, '', [LIVEJOURNAL_BLOG]],
                       self.TASK_ID_VARNAME: [REQUIRED],  
                       self.STREAM_PARSER_THREAD_NUM: [REQUIRED],
                       self.PROFILE_CRAWLER_THREAD_NUM: [REQUIRED],                     
                       self.CRAWL_MODE_VARNAME: [OPTIONAL, CONTINUE_MODE, [CONTINUE_MODE, RESTART_MODE]],
                       };
        
        self.load(configPath);
        self.generate();
        
    def generate(self):
        self.commonGenerate();
        self.taskId = self.data[self.TASK_ID_VARNAME];
        self.crawlMode = self.data[self.CRAWL_MODE_VARNAME];
        
        self.streamFilePath = os.path.join(self.taskId, "stream.dat");
        self.toCrawlHomeUrlFilePath = os.path.join(self.taskId, 'tocrawl.home.url.dat');
        self.toCrawlHomeUrlFileLock = threading.Lock();
        
        self.streamParserThreadNum = int(self.data[self.STREAM_PARSER_THREAD_NUM]);
        self.profileCrawlerThreadNum = int(self.data[self.PROFILE_CRAWLER_THREAD_NUM]);
            
        if self.data[BLOG_SITE_VARNAME] == LIVEJOURNAL_BLOG:
            import LiveJournalParser;
            stream = LiveJournalParser.LiveJournalStream();
            self.streamCrawler = StreamCrawler.StreamCrawler(stream, self);
            
            urlConverter = LiveJournalParser.LiveJournalUrlConverter();
            streamFile = LiveJournalParser.LiveJournalStreamFile();
            elementParser = LiveJournalParser.LiveJournalUpdateParser();
            self.streamParser = StreamParser.StreamParser(streamFile, elementParser, urlConverter, self);
            
            
            profileParser = LiveJournalParser.LiveJournalProfileParser();
            urlFilter = LiveJournalParser.LiveJournalUrlFilter();
            self.profileCrawler = ProfileCrawler.ProfileCrawler(urlConverter, profileParser, urlFilter, self);

    def createCrawler(self):
        from BlogCrawler import AllinOneStreamBlogCrawler;
        crawler = AllinOneStreamBlogCrawler(self);
        return crawler;
    
        
