#!/usr/bin/env python

######################################################################
# @author      : alpha (alpha@mascot)
# @created     : Sunday Dec 26, 2021 17:24:25 CST
#
# @description : classify series
######################################################################

import re
import sys
import ast
import json
import logging
import argparse
import traceback
import pandas
from styleframe import StyleFrame, Styler

def parse_args():
    comment = "Classify series according category regular expression."
    parser = argparse.ArgumentParser(description = comment,
                                     formatter_class =
                                     argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('-l', '--log', dest = 'log', action = 'store',
                        nargs = '?', default = '',
                        help = "specify the log file")
    parser.add_argument('-f', '--excel-file', dest = 'excel_file',
                        action = 'store', required = True,
                        help = "the excel file which contains series data")
    parser.add_argument('-i', '--sheet-index', dest = 'sheet_index',
                        type = int, action = 'store', default = 0,
                        help = "(default) sheet index of the series data")
    parser.add_argument('-n', '--sheet-name', dest = 'sheet_name',
                        type = str, action = 'store', default = None,
                        help = "(priority) sheet name of the series data")
    parser.add_argument('-c', '--category', dest = 'category',
                        action = 'store', required = True,
                        help = "specify the category JSON file")
    parser.add_argument('-o', '--output', dest = 'output',
                        action = 'store', nargs = '?', default = '',
                        help = "save series category to which excel file")
    args = parser.parse_args()
    filename = ''
    if not args.excel_file is None and len(args.excel_file) > 0:
        filename = args.excel_file.rsplit('.', 1)[0]
        if len(filename) > 0:
            filename += '-'
    if not args.output is None and len(args.output) == 0:
        args.output = (filename + "category" + ".xlsx")
    if not args.log is None and len(args.log) == 0:
        args.log = (filename + "category" + ".log")
    return args

def init_logger(log = None, encoding = 'utf-8', mode = 'w'):
    logger = logging.getLogger(__name__)
    logger.setLevel(logging.DEBUG)
    formatter = logging.Formatter('[%(asctime)s][%(levelname)s] %(message)s')
    sh = logging.StreamHandler()
    sh.setFormatter(formatter)
    sh.setLevel(logging.INFO)
    logger.addHandler(sh)
    if not log is None and len(log) > 0:
        fh = logging.FileHandler(log, encoding = encoding, mode = mode)
        fh.setFormatter(formatter)
        logger.addHandler(fh)
    return logger

def load_json(filename):
    if filename is None or len(filename) == 0: return None
    with open(filename, 'r', encoding = 'utf-8') as f:
        # 删除以#开头(包含任意前导空白字符)的注释行
        js = re.sub(r'^[\s]*#.*[\n]', '', f.read(), 0, re.M)
        return json.loads(js)

def format_exception():
    res = ''
    exc_type, exc_value, exc_traceback = sys.exc_info()
    estr = traceback.format_exception(exc_type, exc_value, exc_traceback)
    for s in estr:
        res += s
    return res

class LinuxPatchClassifier:
    def read_excel(self, excel_file, sheet_index, sheet_name = None):
        self.series_dataframe = None
        logger.info('Reading series from excel file "{}", '
                    'sheet(index) {}, sheet(name) {}'
                    .format(excel_file, sheet_index, sheet_name))
        if excel_file is None or len(excel_file) == 0:
            logger.error('Unknown excel file')
            return
        sheet = sheet_index
        if not sheet_name is None and len(sheet_name) > 0:
            sheet = sheet_name
        try:
            self.series_dataframe = pandas.read_excel(excel_file,
                                                      sheet_name = sheet)
            logger.info('Read {} series from excel file "{}" sheet {} success'
                        .format(self.series_dataframe.shape[0],
                                excel_file, sheet))
        except:
            logger.error('Read series from excel file "{}" '
                         'sheet {} failed:\n{}'
                         .format(excel_file, sheet, format_exception()))

    def __compile_pattern(self, pd, cd):
        res = False
        exc = ''
        inc = ''
        if not cd is None:
            exc = cd.get('exclude', '')
            inc = cd.get('include', '')
        if len(exc) > 0:
            pd['exclude'] = re.compile(exc, re.I)
            res = True
        if len(inc) > 0:
            pd['include'] = re.compile(inc, re.I)
            res = True
        return res

    def __init_category_pattern(self, filename):
        has_category = False
        self.category_pattern = {'common': {}, 'category': {}}
        try:
            jd = load_json(filename)
            logger.debug("Category JSON: {}".format(jd))
            if jd is None: return False
            self.__compile_pattern(self.category_pattern['common'],
                                   jd.get('common'))
            cg = jd.get('category')
            if cg is None: return False
            for k, v in cg.items():
                self.category_pattern['category'][k] = {}
                res = self.__compile_pattern(self.category_pattern['category'][k], v)
                has_category = has_category or res
        except:
            logger.error("Initialize category pattern failed:\n{}"
                         .format(format_exception()))
        finally:
            logger.debug("Cagetory pattern: {}, has category {}"
                         .format(list(self.category_pattern.items()),
                                 has_category))
        return has_category

    def __classify_under_common(self, text):
        # 分类规则：
        # 所有分类都不能包含common部分的exclude字符串
        # 但所有分类都要包含common部分的include字符串
        p = self.category_pattern['common'].get('exclude')
        if not p is None:
            m = p.search(text)
            logger.debug('Common exclude: re.search({}, "{}") match {}'
                         .format(p, text, m))
            if not m is None:
                return None
        p = self.category_pattern['common'].get('include')
        if not p is None:
            m = p.search(text)
            logger.debug('Common include: re.search({}, "{}") match {}'
                         .format(p, text, m))
            if m is None:
                return None
        return set()

    def __classify_under_category(self, text):
        # 分类规则：
        # 对应分类不能包含对应的exclude字符串
        # 但要包含对应的include字符串
        c = set()
        for k, v in self.category_pattern['category'].items():
            p = v.get('exclude')
            if not p is None:
                m = p.search(text)
                logger.debug('Category "{}" exclude: re.search({}, "{}") match {}'
                             .format(k, p, text, m))
                if not m is None:
                    continue
            p = v.get('include')
            if not p is None:
                m = p.search(text)
                logger.debug('Category "{}" include: re.search({}, "{}") match {}'
                             .format(k, p, text, m))
                if not m is None:
                    c.add(k)
        return c

    def __do_classify(self, text):
        if self.__classify_under_common(text) is None: return None
        return self.__classify_under_category(text)

    def classify_series(self, filename):
        self.series_category = {}
        if self.series_dataframe is None: return
        if not self.__init_category_pattern(filename): return
        n = 0
        total = self.series_dataframe.shape[0]
        step = round(total / 10)
        step = round(step + int(step == 0) * 0.6)
        progress = range(1, total + 1, step)
        logger.info("Classify {} series ...".format(total))
        for row in self.series_dataframe.itertuples(index = False):
            try:
                n += 1
                if n in progress:
                    logger.info("Progress {}/{}".format(n, total))
                logger.debug('Classify series {}/{}: {}'
                             .format(n, total, row))
                c = set()
                logger.debug("Classify against series: {}"
                             .format(row.Subject))
                sc = self.__do_classify(row.Subject)
                logger.debug("Category: {}, series: {}"
                             .format(sc, row.Subject))
                if not sc is None:
                    c |= sc
                threads = ast.literal_eval(row.Threads)
                logger.debug('Classify {} threads of series {}/{} ...'
                             .format(len(threads), n, total))
                for t in threads:
                    logger.debug("Classify against thread: {}"
                                 .format(t['text']))
                    tc = self.__do_classify(t['text'])
                    logger.debug("Category: {}, thread: {}"
                                 .format(tc, t['text']))
                    if not tc is None:
                        c |= tc
                logger.debug('Series {}/{} category: {}'.format(n, total, c))
                for k in c:
                    self.series_category.setdefault(k, []).append(row)
            except:
                logger.error('Classify series {}/{} failed:\n{}'
                             .format(n, total, format_exception()))
        logger.info("Classify {}/{} series finished, category: {}"
                    .format(n, total, list(self.series_category.keys())))

    def to_excel(self, filename):
        if filename is None or len(filename) == 0: return
        if self.series_dataframe is None:
            logger.info('No series category to save to excel file')
            return
        StyleFrame.A_FACTOR = 1
        StyleFrame.P_FACTOR = 1
        try:
            excel_writer = StyleFrame.ExcelWriter(filename)
            header = Styler(horizontal_alignment = 'left', bold = True)
            styler = Styler(horizontal_alignment = 'left',
                            date_time_format = 'YYYY-MM-DD HH:MM:SS')
            sc = [('all-series', self.series_dataframe)]
            sc += self.series_category.items()
            sheets = []
            for k, v in sc:
                df = pandas.DataFrame(v)
                df = df.sort_values(by = ['Last', 'Date', 'Subject'],
                                    ascending = [False, False, True])
                sf = StyleFrame(df, styler)
                sf.apply_headers_style(header)
                sf.set_column_width(['Subject'], 80)
                sf.set_column_width(['Link'], 60)
                sf.to_excel(excel_writer = excel_writer,
                            sheet_name = k,
                            columns_and_rows_to_freeze = 'B2',
                            best_fit = ['Date', 'Last', 'Threads'])
                sheets += [k]
            excel_writer.save()
        except:
            logger.error("Save series category to excel file failed:\n{}"
                         .format(format_exception()))
            return
        logger.info('Save series category to excel file "{}", sheets {}'
                    .format(filename, sheets))

args = parse_args()
logger = init_logger(args.log)
logger.info("Arguments: {}".format(vars(args)))
classifier = LinuxPatchClassifier()
classifier.read_excel(args.excel_file, args.sheet_index, args.sheet_name)
classifier.classify_series(args.category)
classifier.to_excel(args.output)
