#!/usr/bin/python
# -*- coding: utf-8 -*-

__author__ = 'David Zhang'

import logging
import os
import threading
import time
import uuid
from concurrent.futures import ThreadPoolExecutor
from queue import Queue, Full

from thrift.protocol import TCompactProtocol
from thrift.server import TServer
from thrift.transport import TSocket, TTransport

import config
from find_dep import FuncVulnQueryContext, FuncVulnContext
from rpc import VulnQueryService
from rpc.ttypes import AsyncQueryResult, QueryHistoryResult, VulnResult
from tools.codec.factory import CodecFactory
from tools.utils import SyncDict
from tools.file_helper import del_dir_tree, deprocess_filepath
from tools import parseutility as parser


def _handle_processed_filepaths(processed_filepaths: list):
    ret = {}
    for processed_filepath in processed_filepaths:
        filepath, basename, uuid_ = deprocess_filepath(processed_filepath)
        ret[uuid_] = filepath
    return ret


class VulnQueryServiceImpl(VulnQueryService.Iface):
    """实现rpc接口"""

    def __init__(
            self,
            context: FuncVulnQueryContext,   # 用于查询漏洞
            max_waiting=10,     # 请求队列中最大的等待数
            query_history_keep_time=60,     # 查询历史记录保留时间
            daemon_cleaner_scan_interval=30,    # 历史记录守护线程每次scan的间隔时间
            data_decompressor=CodecFactory.new_auto_decompressor(),  # 解压bytes
    ):
        self._ctx = context
        self._data_decompressor = data_decompressor
        self._requests = Queue(maxsize=max_waiting)     # 用于存放检测请求的等待队列
        self._history = SyncDict()  # 检测结果存放的同步字典
        self._daemon_worker = threading.Thread(target=self.__daemon_worker_run, daemon=True)    # 守护线程用于调度检测请求的处理
        self._daemon_cleaner = threading.Thread(target=self.__daemon_cleaner_run, daemon=True)  # 守护线程用于定时清理过期的漏洞检测结果
        self._daemon_cleaner_scan_interval = daemon_cleaner_scan_interval
        self._query_history_keep_time = query_history_keep_time

        self._daemon_worker.start()
        self._daemon_cleaner.start()

    def __daemon_worker_run(self, *args, **kwargs):
        while True:
            query_key, names, datas = self._requests.get()
            self.__async_query_src_files(query_key, names, datas)

    def __daemon_cleaner_run(self, *args, **kwargs):
        def cond_func(k: str, v: dict):
            """当一个提交的请求执行完阈值时间时候就将其从history中清除"""
            finish_time = v['finish_time']
            cur_time = int(time.time())
            if finish_time > 0 and cur_time - finish_time > self._query_history_keep_time:
                logging.info('The query history of key %s was deleted', k)
                tmp_dir = v['tmp_dir']
                logging.info('Deleted tmp dir %s' % tmp_dir)
                try:
                    del_dir_tree(tmp_dir)
                except Exception as e:
                    logging.exception(e)
                    pass
                return True
            return False

        while True:
            self._history.remove_if(cond_func)
            time.sleep(self._daemon_cleaner_scan_interval)

    def async_query(self, names: list, datas: list) -> AsyncQueryResult:
        """
        异步的方式提交检测请求
        @param names: 待检测的所有文件名
        @param datas: 文件数据
        @return: 检测请求处理结果

        当服务端接受到一个检测请求，
        1. 首先尝试将这个请求放入队列中，若队列到达最大值，直接返回ERROR
        2. 否则直接将这个请求放入等待队列中，等待worker守护线程调度处理
        3. 放入等待队列后，分配一个请求key并将其返回给客户端
        """
        try:
            if len(names) != len(datas):
                raise RuntimeError('The length of names(=%s) is not equal to datas(=%s).', len(names), len(datas))

            if not datas:
                raise RuntimeError('The datas cannot be empty.')

            query_key = uuid.uuid4().__str__()
            logging.info('A new request entered, and allocated key %s' % query_key)
            self._requests.put_nowait((query_key, names, datas))
            self._history.put(
                query_key,
                {
                    'total_files': len(datas),
                    'current_processed_files': 0,
                    'query_rst': None,
                    'enter_time': int(time.time()),
                    'start_time': 0,
                    'finish_time': 0,
                    'status_code': 0,
                    'err_msg': '',
                    'tmp_dir': '',
                    'filepath_map': _handle_processed_filepaths(names),
                    'LoC': 0
                }
            )
            return AsyncQueryResult(status_code=0, key=query_key, err_msg='')
        except Full as e:
            logging.exception(e)
            return AsyncQueryResult(
                status_code=-1,
                key='',
                err_msg='Server is too busy(too many query requests are in line).'
            )
        except Exception as e:
            logging.exception(e)
            return AsyncQueryResult(
                status_code=-1,
                key='',
                err_msg=repr(e).__str__()
            )

    def __async_query_src_files(self, query_key: str, file_names: list, datas: list):

        def update_start(k, v: dict) -> dict:
            v['start_time'] = int(time.time())
            v['status_code'] = 1    # running
            v['tmp_dir'] = dest_dir     # 存储临时文件的目录
            return v

        def update_finish(k, v: dict) -> dict:
            v['query_rst'] = ret_
            v['finish_time'] = int(time.time())
            v['status_code'] = 2
            v['LoC'] = test_lines_
            logging.info('Completed query for key %s, cost %ss.', k, v['finish_time'] - v['start_time'])
            return v

        def update_error(k, v: dict) -> dict:
            v['query_rst'] = None,
            v['finish_time'] = int(time.time())
            v['status_code'] = -1
            v['err_msg'] = err_msg
            return v

        try:
            dest_dir = os.path.join(config.ROOT_PATH, 'tmp', query_key)
            try:
                os.makedirs(dest_dir)
            except Exception as e:
                pass
            self._history.compute(query_key, cpt_func=update_start)

            files = []
            for i, processed_filepath in enumerate(file_names):
                data = datas[i]
                data = self._data_decompressor.decompress(data)
                uuid_ = deprocess_filepath(processed_filepath)[-1]
                fp = os.path.join(dest_dir, uuid_)
                with open(fp, 'wb') as f:
                    f.write(data)
                files.append(fp)

            def do_after_per_file(query_key_):
                def func(k, v: dict):
                    v['current_processed_files'] += 1
                    return v
                self._history.compute(query_key_, func)

            test_func_num_, test_lines_, ret_ = self._ctx.query_files(
                files,
                do_after_per_file=do_after_per_file,
                do_after_per_file_func_args=(query_key,)
            )

            filepath_map = self._history.get(query_key)['filepath_map']
            for vuln_rst in ret_:
                rst: VulnResult = vuln_rst
                rst.test_src_file = filepath_map[rst.test_src_file]
            self._history.compute(query_key, cpt_func=update_finish)
        except Exception as e:
            logging.exception(e)
            err_msg = repr(e).__str__()
            self._history.compute(query_key, cpt_func=update_error)

    def query_history(self, key: str):
        """
        根据请求key查询检测结果
        @param key: 请求key
        @return: 检测结果
        """
        if not self._history.contains(key):
            return QueryHistoryResult(
                status_code=-1,
                progress=0,
                err_msg='The request(key=%s) you queries not exists.' % key
            )
        # {
        #     'total_files': len(datas),
        #     'current_processed_files': 0,
        #     'query_rst': None,
        #     'enter_time': int(time.time()),
        #     'start_time': 0,
        #     'finish_time': 0,
        #     'status_code': 0,
        #     'err_msg': '',
        #     'tmp_dir': '',
        #     'filepath_map': _handle_processed_filepaths(names),
        #     'LoC': 0
        # }
        d: dict = self._history.get(key)
        total_files = d['total_files']
        current_processed_files = d['current_processed_files']
        query_rst = d['query_rst']
        enter_time = d['enter_time']
        start_time = d['start_time']
        finish_time = d['finish_time']
        status_code = d['status_code']
        err_msg = d['err_msg']
        LoC = d['LoC']

        queuing_cost = start_time - enter_time
        queuing_cost = int(time.time()) - enter_time if queuing_cost < 0 else queuing_cost
        running_cost = finish_time - start_time
        running_cost = int(time.time()) - start_time if running_cost < 0 else running_cost

        if status_code == -1:
            # 执行完毕
            return QueryHistoryResult(
                status_code=-1,
                progress=0,
                err_msg=err_msg,
                queuing_cost=queuing_cost,
                running_cost=running_cost
            )
        elif status_code == 0:
            # 排队中
            return QueryHistoryResult(
                status_code=0,
                progress=0,
                err_msg='',
                queuing_cost=queuing_cost,
                running_cost=running_cost
            )
        elif status_code == 1:
            progress = current_processed_files * 100 / total_files
            return QueryHistoryResult(
                status_code=1,
                progress=progress,
                err_msg='',
                queuing_cost=queuing_cost,
                running_cost=running_cost
            )
        elif status_code == 2:
            return QueryHistoryResult(
                status_code=2,
                progress=100,
                vuln_results=query_rst,
                LoC=LoC,
                err_msg='',
                queuing_cost=queuing_cost,
                running_cost=running_cost
            )
        else:
            return QueryHistoryResult(
                status_code=-1,
                progress=0,
                err_msg='Inner error, got an invalid status_code %s' % status_code,
                queuing_cost=queuing_cost,
                running_cost=running_cost
            )


def parse_args():
    import argparse
    argument_parser = argparse.ArgumentParser(description='The IoT firmwares vuln detection rpc-server.')
    argument_parser.add_argument('--abs_base_dir', dest='abs_base_dir', default=config.ABS_BASE_DIR,
                                 help='Specifies the abs base dir, '
                                      'which is parent dir of abs dir storing all info of vuln funcs.')
    argument_parser.add_argument('--history_alive', dest='history_alive', type=int, default=300,
                                 help='Specifies alive time for history of every request result.')

    def check(args_):
        config.ABS_BASE_DIR = args_.abs_base_dir
        return args_

    return check(argument_parser.parse_args())


def run(pool: ThreadPoolExecutor, args):
    try:
        logging.info('Welcomes to use the server of IoT Firmwares Vulnerabilities Detection System.')
        logging.info('Starting...')
        ctx = FuncVulnQueryContext(FuncVulnContext(), pool)
        logging.info('Starts to load all vuln func features.')
        loaded = ctx.load_vuln_func_features(parser.load_abs_feature_files(os.path.join(args.abs_base_dir, 'abs')))
        if loaded == 0:
            logging.warning("The size of loaded func features is zero, "
                            "plz check whether the ABS_BASE_DIR you specified in config.py "
                            "or cmd-line-option --abs_base_dir is invalid.")
        logging.info('Done')
        # 线程池
        server = TServer.TThreadPoolServer(
            VulnQueryService.Processor(VulnQueryServiceImpl(ctx, query_history_keep_time=args.history_alive)),
            # 注意py3的socket tcp通信默认采用tcpv6
            TSocket.TServerSocket(config.RPC_SERVER_HOST, config.RPC_SERVER_PORT),
            TTransport.TFramedTransportFactory(),
            TCompactProtocol.TCompactProtocolFactory()
        )

        server.setNumThreads(config.RPC_SERVER_CONCURRENCY)
        logging.info('The server is serving...')
        server.serve()
    except Exception as e:
        logging.exception(e)


if __name__ == '__main__':
    config.conf_log()
    POOL = ThreadPoolExecutor()
    try:
        run(POOL, parse_args())
    finally:
        POOL.shutdown()
