#!/usr/bin/env python3
#
# 从https://lore.kernel.org/网站爬取linux patch
# 1、指定邮箱列表，例如linux-arm-kernel
# 2、指定查询条件执行查询操作，例如'd:3.days.ago..'
# 该脚本基于上面查询操作的结果爬取linux patch的邮件(thread)
# 并找到各个邮件的根节点，并以根节点作为一组邮件的series输出
# 后续只要打开该series页面，就可以看到该series的所有邮件
#

import re
import sys
import json
import logging
import argparse
import platform
import traceback
from datetime import datetime, timezone
from lxml import etree
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.common.exceptions import NoSuchElementException
import pandas
from styleframe import StyleFrame, Styler

def load_json(filename):
    if filename is None or len(filename) == 0: return None
    with open(filename, 'r', encoding = 'utf-8') as f:
        # 删除以#开头(包含任意前导空白字符)的注释行
        js = re.sub(r'^[\s]*#.*[\n]', '', f.read(), 0, re.M)
        return json.loads(js)

def load_config(filename):
    jd = None
    try:
        jd = load_json(filename)
    except:
        print('load config file "{}" failed:\n{}'
              .format(filename, format_exception()))
    if jd is None:
        jd = {}
    return jd

def normalize_keys(df):
    if df is None: return None
    res = {}
    for k, v in df.items():
        res[k.replace('-', '_')] = v
    return res

def init_parser(df):
    comment = ("Search and crawl linux patch series "
               "from https://lore.kernel.org/. "
               "The default config can be defined in default.json.")
    parser = argparse.ArgumentParser(description = comment,
                                     formatter_class = argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('-l', '--log', dest = 'log', action = 'store',
                        nargs = '?', default = df.get('log', ''),
                        help = "specify the log file")
    parser.add_argument('-i', '--inbox', dest = 'inbox', action = 'store',
                        default = df.get('inbox', 'linux-arm-kernel'),
                        help = "search linux patch threads from which inbox")
    parser.add_argument('-q', '--queries', dest = 'queries', action = 'store',
                        default = df.get('queries', 'd:7.days.ago..'),
                        help = "search linux patch threads by what queries")
    parser.add_argument('-d', '--from-days-ago', dest = 'from_days_ago',
                        type = int, action = 'store',
                        default = df.get('from_days_ago'),
                        help = 'search linux patch threads by '
                               'd:{}.days.ago.. and overwrite --queries')
    parser.add_argument('-m', '--max-threads', dest = 'max_threads',
                        type = int, action = 'store',
                        default = df.get('max_threads', 1000),
                        help = "how many threads will be crawled at most")
    parser.add_argument('-g', '--ignore-reply', dest = 'ignore_reply',
                        action = 'store_true',
                        default = df.get('ignore_reply', False),
                        help = 'ignore reply threads, which subject start with "Re:"')
    parser.add_argument('-c', '--category', dest = 'category', nargs = '?',
                        action = 'store', default = df.get('category', ''),
                        help = "specify the category JSON file")
    parser.add_argument('-o', '--output', dest = 'output',
                        action = 'store', nargs = '?',
                        default = df.get('output', ''),
                        help = "save series to which excel file")
    parser.add_argument('-b', '--browser', dest = 'browser',
                        choices = ['chrome', 'firefox'], action = 'store',
                        default = df.get('browser', 'chrome'),
                        help = "specify the browser")
    parser.add_argument('-a', '--attributes', dest = 'attributes',
                        action = 'store', default = df.get('attributes', ''),
                        help = "specify the attributes file")
    return parser

def default_args():
    parser = init_parser(normalize_keys(load_config('default.json')))
    args = parser.parse_args()
    attr = {}
    if not args.attributes is None and len(args.attributes) > 0:
        attr = normalize_keys(load_config(args.attributes))
    for k, v in attr.items():
        setattr(args, k, v)
    return args

def parse_args():
    parser = init_parser(vars(default_args()))
    args = parser.parse_args()
    filename = 'lore-kernel-org'
    now = datetime.today().strftime('%Y-%m-%d-%H-%M-%S')
    if not args.inbox is None and len(args.inbox) > 0:
        filename = args.inbox + '-' + now
    else:
        filename += '-' + now
    if not args.output is None and len(args.output) == 0:
        args.output = (filename + ".xlsx")
    if not args.log is None and len(args.log) == 0:
        args.log = (filename + ".log")
    if not args.from_days_ago is None and args.from_days_ago > 0:
        args.queries = 'd:{}.days.ago..'.format(args.from_days_ago)
    return args

def init_logger(log):
    logger = logging.getLogger('lore.kernel.org')
    logger.setLevel(logging.DEBUG)
    formatter = logging.Formatter('[%(asctime)s][%(levelname)s] %(message)s')
    sh = logging.StreamHandler()
    sh.setFormatter(formatter)
    sh.setLevel(logging.INFO)
    logger.addHandler(sh)
    if not log is None and len(log) > 0:
        fh = logging.FileHandler(log, encoding = 'utf-8', mode = 'w')
        fh.setFormatter(formatter)
        logger.addHandler(fh)
    return logger

def format_exception():
    res = ''
    exc_type, exc_value, exc_traceback = sys.exc_info()
    estr = traceback.format_exception(exc_type, exc_value, exc_traceback)
    for s in estr:
        res += s
    return res

def extract_thread_date(text):
    # 输入的日期格式: '- by Conley Lee @ 2022-01-12  2:37 UTC [99%]'
    # 取出其中的日期部分并转化为本地时间
    try:
        # 原始日期包含UTC，但无法直接转换成UTC时间
        date = re.split('@|UTC', text)[-2].strip() # 取日期部分
        # 原始日期转换成本地时间
        date = datetime.strptime(date, '%Y-%m-%d %H:%M')
        # 原始时间不包含秒数，这里加上最大秒数
        date = date.replace(second = 59)
        # 转成UTC时间（修正时间）
        date = datetime.combine(date.date(), date.time(), timezone.utc)
        # 转成本地时间
        date = date.astimezone()
        # 删除时区信息，保存到excel时不能包含时区信息
        date = date.replace(tzinfo = None)
        logger.debug('extract thread date "{}" from "{}" success'
                     .format(date, text))
        return date
    except:
        logger.error('extract thread date from "{}" failed:\n{}'
                     .format(text, format_exception()))
        return None

def extract_series_date(text):
    # 输入日期：
    # 'Date: Wed, 12 Jan 2022 11:11:04 -0500 (EST)	[thread overview]'
    # 取出其中日期部分，并转化为本地时间
    # 时区部分可能没有，也可能是其他时区，例如GMT，也可能没有括号
    try:
        # 删除'Date:'部分内容
        date = text.split(":", 1)[1].strip()
        # 删除星期部分
        idx = int(date.find(',') > 0) # 兼容不包含星期部分
        date = re.split('\,|\[', date)[idx].strip()
        # 删除时区名称（如果包含的话）
        if not date[-1].isdigit():
            date = date.rsplit(' ', 1)[0].strip()
        if re.search('[+-]', date) is None:
            # 不包含UTC偏移时间
            date = datetime.strptime(date, '%d %b %Y %H:%M:%S')
            # 转成UTC时间（修正时间）
            date = datetime.combine(date.date(), date.time(), timezone.utc)
        else:
            date = datetime.strptime(date, '%d %b %Y %H:%M:%S %z')
        # 转成本地时间
        date = date.astimezone()
        # 删除时区信息，保存到excel时不能包含时区信息
        date = date.replace(tzinfo = None)
        logger.debug('extract series date "{}" from "{}" success'
                     .format(date, text))
        return date
    except:
        logger.error('extract series date from "{}" failed:\n{}'
                     .format(text, format_exception()))
        return None

class LoreKernelOrgCrawler:

    def __init__(self, browser):
        self.__init_driver(browser)

    def __init_driver(self, browser):
        # 注意浏览器的webdriver驱动的版本要与系统安装的浏览器版本匹配
        # 如果版本不匹配，find_element返回的不是web element对象
        if not browser is None and browser.lower() == 'firefox':
            self.__init_firefox()
        else:
            self.__init_chrome()

    def __init_chrome(self):
        if platform.system() == 'Linux':
            opt = webdriver.ChromeOptions()
            opt.headless = True
            self.driver = webdriver.Chrome(options = opt)
        else:
            # 在Windows下如果隐藏窗口，会报Content Security Policy错误
            self.driver = webdriver.Chrome()

    def __init_firefox(self):
        self.driver = webdriver.Firefox()

    def __del__(self):
        self.driver.close()

    def __search_threads(self, inbox, queries):
        base_url = 'https://lore.kernel.org/'
        start_url = base_url + inbox
        logger.info('Searching threads: url: "{}", queries: "{}"'
                    .format(start_url, queries))
        try:
            self.driver.get(start_url)
            # queries input text element
            q = self.driver.find_element(By.XPATH,
                                         "/html/body/form/pre/input[1]")
            q.send_keys(queries)
            # search input submit element
            search = self.driver.find_element(By.XPATH,
                                              "/html/body/form/pre/input[2]")
            search.click()
        except:
            logger.error("Search threads failed:\n{}"
                         .format(format_exception()))
            return False
        logger.info("Search threads success")
        return True

    def __crawl_page(self, max_threads, ignore_reply):
        if max_threads == 0:
            logger.info('Max number of threads reached')
            return 0, 0
        logger.info('Crawling page: {}'.format(self.driver.current_url))
        count = 0
        ignore = 0
        try:
            # 如果当前页有thread，则有3个<pre>标签，否则只有1个<pre>标签
            pre = self.driver.find_elements(By.XPATH, "/html/body/pre")
            if len(pre) < 3:
                logger.warning("No thread in current page")
                return 0, 0
            # 第1个<pre>标签下的所有<a>标签是当前页的thread
            # 紧跟每个thread后的是作者和日期部分的文本
            idx = 0
            html = etree.HTML(self.driver.page_source)
            texts = html.xpath('/html/body/pre[1]/text()') # 作者和日期部分
            pre = self.driver.find_element(By.XPATH, "/html/body/pre[1]")
            for a in pre.find_elements(By.TAG_NAME, 'a'):
                idx += 1 # 跳过第一个字符串(序号)
                text = texts[idx].strip()
                date = extract_thread_date(text)
                if date is None: continue
                t = {'text': a.text,
                     'href': a.get_attribute("href"),
                     'date': date}
                if ignore_reply and a.text.startswith('Re:'):
                    ignore += 1
                    logger.debug('Ignore reply thread: {}'.format(t))
                    continue
                self.threads.append(t)
                count += 1
                max_threads -= 1
                if max_threads == 0:
                    logger.info('Max number of threads reached')
                    break
        except:
            logger.error("Crawled current page failed:\n{}"
                         .format(format_exception()))
        return count, ignore

    def crawl_threads(self, inbox, queries, max_threads = -1, ignore_reply = False):
        self.start_time = datetime.today()
        self.threads = []
        # 根据指定条件查询thread，并打开第一页查询结果
        if not self.__search_threads(inbox, queries): return
        logger.info("Crawlling threads, at most {} ..."
                    .format('unlimited' if max_threads < 0 else max_threads))
        try:
            # 先爬取第一页的thread
            page = 1
            ignore = 0
            count, ig = self.__crawl_page(max_threads, ignore_reply)
            if count == 0: return
            max_threads -= count
            ignore += ig
            while max_threads != 0:
                # 打开并爬取下一页的thread
                n = self.driver.find_element(By.LINK_TEXT, "next (older)")
                self.driver.get(n.get_attribute("href")) # 打开下一页
                page += 1
                count, ig = self.__crawl_page(max_threads, ignore_reply)
                if count == 0: return
                max_threads -= count
                ignore += ig
        except NoSuchElementException:
            logger.info("Crawled all pages")
        finally:
            if ignore > 0:
                logger.info("Crawled {} pages, total {} threads, "
                            "ignore {} reply threads"
                            .format(page, len(self.threads), ignore))
            else:
                logger.info("Crawled {} pages, total {} threads"
                            .format(page, len(self.threads)))

    def __compile_pattern(self, pd, cd):
        res = False
        exc = ''
        inc = ''
        if not cd is None:
            exc = cd.get('exclude', '')
            inc = cd.get('include', '')
        if len(exc) > 0:
            pd['exclude'] = re.compile(exc, re.I)
            res = True
        if len(inc) > 0:
            pd['include'] = re.compile(inc, re.I)
            res = True
        return res

    def __init_category_pattern(self, filename):
        has_category = False
        self.category_pattern = {'common': {}, 'category': {}}
        try:
            jd = load_json(filename)
            logger.debug("Category JSON: {}".format(jd))
            if jd is None: return False
            self.__compile_pattern(self.category_pattern['common'],
                                   jd.get('common'))
            cg = jd.get('category')
            if cg is None: return False
            for k, v in cg.items():
                self.category_pattern['category'][k] = {}
                res = self.__compile_pattern(self.category_pattern['category'][k], v)
                has_category = has_category or res
        except:
            logger.error("Initialize category pattern failed:\n{}"
                         .format(format_exception()))
        finally:
            logger.debug("Cagetory pattern: {}, has category {}"
                         .format(list(self.category_pattern.items()),
                                 has_category))
        return has_category

    def __classify_under_common(self, text):
        # 分类规则：
        # 所有分类都不能包含common部分的exclude字符串
        # 但所有分类都要包含common部分的include字符串
        p = self.category_pattern['common'].get('exclude')
        if not p is None:
            m = p.search(text)
            logger.debug('Common exclude: re.search({}, "{}") match {}'
                         .format(p, text, m))
            if not m is None:
                return None
        p = self.category_pattern['common'].get('include')
        if not p is None:
            m = p.search(text)
            logger.debug('Common include: re.search({}, "{}") match {}'
                         .format(p, text, m))
            if m is None:
                return None
        return set()

    def __classify_under_category(self, text):
        # 分类规则：
        # 对应分类不能包含对应的exclude字符串
        # 但要包含对应的include字符串
        c = set()
        for k, v in self.category_pattern['category'].items():
            p = v.get('exclude')
            if not p is None:
                m = p.search(text)
                logger.debug('Category "{}" exclude: re.search({}, "{}") match {}'
                             .format(k, p, text, m))
                if not m is None:
                    continue
            p = v.get('include')
            if not p is None:
                m = p.search(text)
                logger.debug('Category "{}" include: re.search({}, "{}") match {}'
                             .format(k, p, text, m))
                if not m is None:
                    c.add(k)
        return c

    def __do_classify(self, text):
        if self.__classify_under_common(text) is None: return None
        return self.__classify_under_category(text)

    def __classify_series(self, t, s):
        c = set()
        if not s is None:
            sc = self.__classify_against_series(s)
            if not sc is None:
                c |= sc
        if not t is None:
            tc = self.__classify_against_thread(t)
            if not tc is None:
                c |= tc
        return c

    def __classify_against_series(self, s):
        c = set()
        if s['Link'] in self.category_records:
            c = self.category_records.get(s['Link'])
            logger.debug('Found category {} against series {}'.format(c, s))
            return c
        c = self.__do_classify(s['Subject'])
        self.category_records[s['Link']] = c
        return c

    def __classify_against_thread(self, t):
        c = set()
        if t['href'] in self.category_records:
            c = self.category_records.get(t['href'])
            logger.debug('Found category {} against thread {}'.format(c, t))
            return c
        c = self.__do_classify(t['text'])
        self.category_records[t['href']] = c
        return c

    def __find_other_thread(self, html):
        try:
            text = html.xpath('/html/body/pre[1]/text()[1]')[0]
            logger.debug('Find thread in other inbox: {}'.format(text))
            if not text is None and 'found in' in text:
                e = self.driver.find_elements(By.XPATH,
                                              "/html/body/pre[1]/a")[0]
                t = {'text': e.text, 'href': e.get_attribute('href')}
                logger.debug('Found thread in other inbox: {}'.format(t))
                return t
            logger.debug('Not found thread in other inbox')
            return None
        except:
            logger.debug('Find thread in other inbox failed:\n{}'
                         .format(format_exception()))
            return None

    def __read_other_inbox(self, html):
        logger.debug('Reading series in other inbox ...')
        t = self.__find_other_thread(html)
        if t is None: return None
        if t['href'] == self.driver.current_url:
            logger.warning('Dead loop series found: {}'.format(t['href']))
            return None
        if not (s := self.__parse_thread(t)) is None:
            logger.debug('Read series in other inbox success')
            return s
        logger.debug('Read series in other inbox failed')
        return None

    def __read_series_info(self):
        logger.debug("Reading series info from: {}".format(self.driver.current_url))
        try:
            # 读取Date
            html = etree.HTML(self.driver.page_source)
            text = html.xpath('/html/body/pre[1]/text()[2]')[0].strip()
            if not text.startswith('Date:'):
                logger.debug('Not found series date')
                if not (s := self.__read_other_inbox(html)) is None:
                    return s
                return None
            date = extract_series_date(text)
            if date is None: return None
            # 读取Subject
            s = self.driver.find_element(By.XPATH, "/html/body/pre[1]/a[1]")
            text = s.text
            href = s.get_attribute("href").rsplit("#", 1)[0] # 忽略页内锚点
            return {"Subject": text, "Link": href, "Date": date}
        except:
            logger.warning('Unknown series page:\n{}'
                           .format(format_exception()))
            return None

    # series是同一组补丁所有thread的根节点(第一个thread)
    # 通过解析Thread overview部分的内容查找series的URL
    def __find_series_url(self):
        logger.debug("Finding series URL from: {}".format(self.driver.current_url))
        try:
            # 先找到Thread overview：倒数第3个<pre>标签
            pre = self.driver.find_elements(By.XPATH, "/html/body/pre")
            logger.debug("Thread page body <pre> tag number: {}"
                         .format(len(pre)))
            pre = pre[-3]
            # 根据Thread overview的'Atom feed'节点查找series URL
            # 'Atom feed'节点总是存在的，以该节点为锚点，
            # 如果该节点是最后一个节点，则当前页是series页，
            # 如果该节点的下一个节点是'top'节点，
            # 则'top'节点的下一个节点是series URL节点
            # 如果该节点的下一个节点不是'top'，则是series URL节点
            e = pre.find_elements(By.CSS_SELECTOR, '*');
            idx = [i for i, t in enumerate(e) if t.text == 'Atom feed'][-1]
            idx += 1
            if idx == len(e):
                # Atom是最后一个节点，当前页是series页
                return ''
            if e[idx].text == 'top':
                # Atom下一个节点是top，则再下一个节点是series URL节点
                idx += 1
            s = e[idx]
            # series URL节点先尝试按<a>标签解析
            url = s.get_attribute("href")
            if not url is None:
                logger.debug("Found series URL: {}".format(url))
                return url.rsplit("#", 1)[0] # 忽略页内锚点
            # series URL节点不是<a>标签再按<b>标签解析
            # series URL是<b>标签内的<a>标签
            s = s.find_element(By.TAG_NAME, 'a')
            url = s.get_attribute("href")
            if not url is None:
                logger.debug("Found series URL: {}".format(url))
                return url.rsplit("#", 1)[0] # 忽略页内锚点
            logger.warning("Not found series URL")
            return None
        except:
            # thread页的结构变了，不能正确识别
            logger.warning("Unknown thread page:\n{}"
                           .format(format_exception()))
            return None

    def __parse_thread(self, t):
        logger.debug("Parsing thread: {}".format(t))
        if t['href'] in self.series:
            logger.debug("Series exist: {}".format(t['href']))
            return self.series[t['href']]
        self.driver.get(t['href']) # 打开thread
        logger.debug("Read thread page finished: {}".format(t['href']))
        s = None
        url = self.__find_series_url()
        if url is None or len(url) == 0:
            # 从当前页读取series信息
            s = self.__read_series_info()
        else:
            if url in self.series:
                logger.debug("Series exist: {}".format(url))
                return self.series[url]
            logger.debug("Reading series page: {}".format(url))
            self.driver.get(url) # 打开series
            s = self.__read_series_info()
            if s is None:
                # 从当前页读取series信息
                logger.debug("Reading series page: {}".format(t['href']))
                self.driver.get(t['href'])
                s = self.__read_series_info()
        logger.debug("Parse thread finished")
        return s

    def __save_series(self, s, c):
        if s is None: return
        self.series.setdefault(s['Link'], s)
        if c is None: return
        for k in c:
            if k in self.series_category:
                self.series_category[k].setdefault(s['Link'], s)
            else:
                self.series_category[k] = {s['Link']: s}

    def parse_series(self, filename):
        n = 0
        en = 0
        total = len(self.threads)
        step = round(total / 10)
        step = round(step + int(step == 0) * 0.6)
        progress = range(1, total + 1, step)
        self.series = {}
        self.series_category = {}
        self.category_records = {}
        has_category = self.__init_category_pattern(filename)
        logger.info('Parsing series from {} threads ...'.format(total))
        for t in self.threads:
            n += 1
            if n in progress:
                logger.info("Progress {}/{}".format(n, total))
            try:
                logger.debug("Thread {}/{}:".format(n, total))
                s = self.__parse_thread(t)
                if not s is None:
                    # thread按时间逆序排列，
                    # 因此第一个thread的日期就是series最后更新的日期
                    # 在这里设置series最后更新日期是因为从其他inbox读取thread时
                    # 对应的thread没有读取日期
                    s.setdefault('Last', t['date'])
                    # 为了重新分类时能够使用ast.literal_eval解析Threads字段
                    # 这里不保存thread的日期
                    s.setdefault('Threads', []).append({'text': t['text'],
                                                        'href': t['href']})
                else:
                    logger.error('Parse {}/{} thread failed'
                                 .format(n, total))
                c = self.__classify_series(t, s) if has_category else None
                logger.debug("Category: {}, Series: {}".format(c, s))
                self.__save_series(s, c)
            except:
                en += 1
                logger.error("Parse thread {}/{} exception({}):\n{}"
                             .format(n, total, en, format_exception()))
                if en > 10:
                    logger.error('Too many exceptions, stop parsing series')
                    break
        logger.info('Parse series finished, {}/{} threads, {} exceptions'
                    .format(n, total, en))
        logger.info("Total series {}".format(len(self.series)))
        logger.debug('Series list: {}'.format(list(self.series.values())))
        logger.info("Total series categories {}".format(len(self.series_category)))
        logger.debug("Series categories: {}".format(self.series_category))

    def to_excel(self, filename):
        if filename is None or len(filename) == 0: return
        if len(self.series) == 0:
            logger.info('No series to save to excel file')
            return
        StyleFrame.A_FACTOR = 1
        StyleFrame.P_FACTOR = 1
        try:
            excel_writer = StyleFrame.ExcelWriter(filename)
            header = Styler(horizontal_alignment = 'left', bold = True)
            styler = Styler(horizontal_alignment = 'left',
                            date_time_format = 'YYYY-MM-DD HH:MM:SS')
            sc = [('all-series-' + self.start_time.date().isoformat(),
                   self.series)]
            sc += self.series_category.items()
            sheets = []
            for k, v in sc:
                df = pandas.DataFrame(v.values())
                df = df.sort_values(by = ['Last', 'Date', 'Subject'],
                                    ascending = [False, False, True])
                sf = StyleFrame(df, styler)
                sf.apply_headers_style(header)
                sf.set_column_width(['Subject'], 80)
                sf.set_column_width(['Link'], 60)
                sf.to_excel(excel_writer = excel_writer,
                            sheet_name = k,
                            columns_and_rows_to_freeze = 'B2',
                            best_fit = ['Date', 'Last', 'Threads'])
                sheets += [k]
            excel_writer.save()
        except:
            logger.error("Save series to excel file failed:\n{}"
                         .format(format_exception()))
            return
        logger.info('Save series to excel file "{}", sheets {}'
                    .format(filename, sheets))

args = parse_args()
logger = init_logger(args.log)
logger.info("Arguments: {}".format(vars(args)))
crawler = LoreKernelOrgCrawler(args.browser)
crawler.crawl_threads(args.inbox, args.queries,
                      args.max_threads, args.ignore_reply)
crawler.parse_series(args.category)
crawler.to_excel(args.output)
