__author__ = 'jiangchanghao'
# coding = utf8

from FavProcess import FavProcess

class CrawlTopic(FavProcess):

    def __init__(self, filename=''):
        FavProcess.__init__(self, filename)
        self.label_list = []
        self.url_list = []
        self.print_streams = {}
        self.init_lists()

    def init_lists(self):
        import ConfigParser
        cf = ConfigParser.ConfigParser()
        cf.read('crawl_topic_list.txt')
        self.label_list = cf.items('label')
        self.url_list = cf.items('url')
        filename = cf.get('file', 'prefix')
        for (content, _str) in self.label_list:
            if not self.print_streams.has_key(_str):
                _out = open('res/' + filename + '_label_' + _str, 'w')
                self.print_streams[_str] = _out
        for (content, _str) in self.url_list:
            if not self.print_streams.has_key(_str):
                _out = open('res/' + filename + '_url_' + _str, 'w')
                self.print_streams[_str] = _out

    def finalize(self):
        for (str, _out) in self.print_streams.items():
            _out.close()

    def process_line(self, line):
        ss = line.split('\t')
        num = len(ss)
        assert num > 0 and (num - 2) % 3 == 0, "error for taking line syntax"
        user_id = ss[0]
        time_mod = ss[-1]
        idx = 1
        while idx < num - 1:
            url, title, tag = ss[idx:idx+3]
            url = self.url_short(url) # refactor the url first
            idx += 3
            # 1. filter on tag (their can be filter or pre-process on url itself.
            if tag == '-':
                # when user have no tag on this url, we can utilize the title, if the title is too long, don't count it.
                if len(title) < 30: # 30 chinese words
                    tag = title
                else:
                    continue
            # 2. record them
            self.process_info(user_id, url, title, tag)

    def process_info(self, user_id, url, title, tag):
        self.matchTag(user_id, url, title, tag)
        self.matchUrl(user_id, url, title, tag)

    def matchTag(self, user_id, url, title, tag):
        for (key, _str) in self.label_list:
            score = 0
            if key in title:
                score += 1
            if key in tag:
                score += 10
            if score > 0:
                _out = self.print_streams[_str]
                out_str = str(score) + '\t' + key + '\t' + user_id + '\t' + tag + '\t' + title + '\t' + url + '\n'
                _out.write(out_str)
                return

    def matchUrl(self, user_id, url, title, tag):
        for (key, _str) in self.url_list:
            if key in url:
                _out = self.print_streams[_str]
                out_str = '-1' + '\t' + key + '\t' + user_id + '\t' + tag + '\t' + title + '\t' + url + '\n'
                _out.write(out_str)
                return


if __name__ == '__main__':
    test = CrawlTopic()
    test.init()
    test.finalize()