#!/usr/bin/env python3
# -*- encoding: utf-8 -*-
# Author: Xiangchen Li (xiangchen_li@ptm-biolab.com)
# Date: 2019-10-16 19:18:16
# Description: Automatic protein function annotation which contains:
# Gene Ontology (GO), COG/KOG, Domain, Subcellular location, KEGG and PPI annotation.
# In addition, meta-proteome annotation.
# FilePath: /Annotation_pipeline/protein_annotation.py
# Version: 1.3


import os
import re
import sys
import time
import magic
import shutil
import subprocess
import traceback
import logging.handlers
import matplotlib
from collections import defaultdict, Counter, OrderedDict
import pandas as pd
import numpy as np
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio.Alphabet import IUPAC, _verify_alphabet
from argparse import ArgumentParser
import xml.etree.ElementTree as XeT
from goatools import obo_parser
import multiprocessing as mp


def parse_cmdline():
    """
    Parse command-line arguments for script.
    :return: Input command-line arguments
    """
    parser = ArgumentParser(prog="protein_annotation.py")
    parser.add_argument(
        "--version",
        action="version",
        version="%(prog)s " + __version__,
    )
    parser.add_argument(
        "-i",
        dest="fasta",
        action="store",
        required=True,
        default=None,
        help="Input protein sequences file [Homo_sapiens_9606_PR_20190513.fasta]"
    )
    parser.add_argument(
        "-o",
        dest="output",
        action="store",
        default='Annotation',
        help="Output directory [default Annotation]"
    )
    parser.add_argument(
        "-f",
        "--force",
        dest="force",
        action="store_true",
        default=False,
        help="Force new file overwriting"
    )
    parser.add_argument(
        "-a",
        "--add",
        dest="add",
        action="store_true",
        default=False,
        help="Try to add functional description to unknown proteins by using UniRef database"
    )
    parser.add_argument(
        "--noclobber",
        dest="noclobber",
        action="store_true",
        default=False,
        help="Do not nuke existing files"
    )
    parser.add_argument(
        "-m",
        "--ms",
        dest="ms",
        default=None,
        action='store',
        help="Input a MS file to just annotate the proteins in it [default None]"
    )
    parser.add_argument(
        "-p",
        "--part",
        dest="part",
        default=None,
        nargs='+',
        action='store',
        choices=['all', 'meta', 'virus', 'cog', 'go', 'domain', 'kegg', 'ppi', 'sub'],
        help="Which part need to run? [default all]. "
             "Currently, there were six annotation units in this pipeline: "
             "1. cog; 2. go; 3. domain; 4. kegg; 5. ppi; 6. sub. "
             "You can choose any of them to do functional annotations. For example: "
             "If you want to do a complete proteome annotation, you can use [-p all] option. "
             "If you just want to do the KEGG annotation, you can just select [-p kegg]. "
             "Likewise, using [-p cog kegg ppi] for cog, kegg and ppi annotation. "
             "In addition, you can use [-p meta] to do a Meta-proteome annotation job, or "
             "[-p virus] to do a functional annotation for viral proteome."
    )
    parser.add_argument(
        "-t",
        "--threads",
        type=int,
        action="store",
        dest="threads",
        default=20,
        help="How many threads will be used? No more than 40 [default 20]."
    )
    parser.add_argument(
        "-k",
        "--kingdom",
        dest="kingdom",
        action="store",
        default=None,
        choices=["animal", "plant", "fungi", "bacteria", "archaea", "protist"],
        help="Define a (super)kingdom for KEGG and subcellular location annotation "
             "[default animal]. WolfPsort: ['animal', 'plant', 'fungi']; "
             "Psortb: ['bacteria', 'archaea']"
    )
    parser.add_argument(
        "-g",
        "--gram",
        dest="gram",
        default=None,
        action="store",
        choices=["p", "positive", "n", "negative"],
        help="If you have chosen [-p] with [bacteria], "
             "please define it as gram positive or negative. [default None]"
    )
    parser.add_argument(
        "-v",
        dest="verbose",
        action="store_true",
        default=False,
        help="Give verbose output."
    )
    return parser.parse_args()


def last_exception():
    """ 返回上一个错误信息，用于logging打印出来"""
    exc_type, exc_value, exc_traceback = sys.exc_info()
    return ''.join(
        traceback.format_exception(exc_type, exc_value, exc_traceback))


def pipeline_formation(part):
    pipeline_list = []
    m_list = []
    if 'all' in part:
        m_list.append('all')
        pipeline_list = ['cog', 'go', 'domain', 'kegg', 'ppi', 'sub']
    if 'meta' in part:
        m_list.append('meta')
        pipeline_list = ['cog', 'go', 'domain', 'kegg']
    if 'virus' in part:
        m_list.append('virus')
        pipeline_list = ['go', 'domain']
    if len(m_list) > 1:
        logger.info("Two or all of [all, meta, virus] were selected, "
                    "but it is totally invalid!")
        sys.exit(1)
    if len(m_list) == 0:
        pipeline_list = part
    part_line = ', '.join(pipeline_list)
    logger.info("This pipeline will do [{}] annotation(s).".format(part_line))
    return pipeline_list


def parameters_validation(cpu):
    # Terminate if CPU over 40
    if args.threads > cpu:
        logger.error('Please do not use more than [{}] CPUs. '
                     'Otherwise, the server will crash!'.format(str(cpu)))
        sys.exit(1)
    # Whether Psortb or WoLFPSort
    if 'meta' not in args.part and 'virus' not in args.part:
        if 'sub' in args.part or 'kegg' in args.part:
            if args.kingdom is None:
                logger.error('For Subcelluar localization or KEGG annotation, [-k] must be specified.')
                sys.exit(1)
            if args.kingdom in ['bacteria']:
                if args.gram is None:
                    logger.error('You have chosen [-p bacteria], but the gram type is missing.')
                    sys.exit(1)
    else:
        if args.kingdom is not None:
            logger.warning('[-k {}] is noneffective in this functional meta-proteome '
                           'annotation pipeline'.format(args.kingdom))


def make_output_dir():
    """Make the output directory, if required.
    
    If the output directory already exists and args.force is not set True,
    stop with an error.
    
    If args.force is set ...
        If args.noclobber is not set True, delete the output directory tree;
        If args.noclobber is set True, use the existing output directory,
        and keep any existing output.
    """
    if 'all' in args.part or 'meta' in args.part or 'virus' in args.part:
        args.noclobber = False
    else:
        args.noclobber = True
    if os.path.exists(args.output):
        if not args.force:
            # Crash not if directory exists
            logger.error(f"Output directory [{args.output}] exists (exiting).")
            raise SystemExit(1)
        if args.noclobber:
            logger.warning(f"Reusing [{args.output}], NOCLOBBER and FORCE set.")
        else:
            # args.force only is set - delete
            logger.info(f"FORCE set, removing existing [{args.output}]")
            shutil.rmtree(args.output)
    os.makedirs(args.output, exist_ok=True)


def new_logger():
    """
    本函数用于生成屏幕日志
    """
    # Set up logging
    new_log = logging.getLogger('Start functional proteome annotation: %s' % time.asctime())
    # 指定logging输出的格式
    # new_formatter = logging.Formatter('%(levelname)s: %(message)s')
    new_format = logging.Formatter('[%(asctime)s] - [%(levelname)s] - %(message)s')
    # 指定日志的最低输出级别
    new_log.setLevel(logging.DEBUG)
    # 控制台日志
    console_handler = logging.StreamHandler(sys.stderr)
    console_handler.formatter = new_format
    if args.verbose:
        console_handler.setLevel(logging.INFO)
    else:
        console_handler.setLevel(logging.WARNING)
    new_log.addHandler(console_handler)
    return new_log, new_format


def make_log_file(log_file):
    """
    此函数用于生产存放于输出目录中的日志文件，log.txt
    :param log_file: 日志文件
    :return: logging对象
    """
    logger.setLevel(logging.DEBUG)
    file_handler = logging.FileHandler(log_file, mode='a', delay=False, encoding='UTF-8')
    file_handler.formatter = formatter
    logger.addHandler(file_handler)


def not_empty(s):
    return s and s.strip()


def is_protein(seq):
    """
    此函数用于判断序列是否为20种常见氨基酸+6种罕见氨基酸所组成的序列
    :param seq: 蛋白序列
    :return: 是否为合格的蛋白序列
    """
    my_prot = Seq(str(seq), IUPAC.ExtendedIUPACProtein)
    return _verify_alphabet(my_prot)


def is_fasta(filename):
    """
    此函数用于判断输入的蛋白序列文件是否为合格的FASTA格式
    :param filename: 蛋白序列文件路径
    :return: 是否为FASTA格式文件
    """
    fasta = SeqIO.parse(filename, "fasta")
    # False when `fasta` is empty, i.e. wasn't a FASTA file
    return any(fasta)


def unknown_protein(description):
    """
    此函数用于从输入的蛋白序列中挑选未知蛋白
    :param description: 蛋白描述信息（字符串）
    :return: 功能是否未知并返回新的描述信息（未知则为空）
    """
    unknown_characters = ['characterized protein',
                          'hypothetical protein',
                          'nknown']
    is_unknown = False
    tmp_list = str(description).strip().split(' ', maxsplit=1)
    if len(tmp_list) == 2:
        for character in unknown_characters:
            if character in str(description):
                is_unknown = True
                break
        new_description = tmp_list[1]
    else:
        is_unknown = True
        new_description = ''
    return is_unknown, new_description


def good_annotation(text):
    """
    此函数用于从UniRef数据库比对结果中挑选出功能未知的蛋白
    :param text: 蛋白描述信息（字符串）
    :return: 是否为未知
    """
    pattern_list = [r'hypothetical', r'[Pp]redicted protein',
                    r'[Uu]ncharacterized protein', r'Os\d+g\d+ protein',
                    r'OSJNBa', r'WGS project', r'unnamed protein', r'[Ss]imilar to']
    valid = True
    for each_pattern in pattern_list:
        if re.search(each_pattern, text):
            valid = False
            break
    return valid


def calculate_percentage(item_num, all_num):
    a = item_num / all_num * 100
    percentage = round(a, 1)
    return percentage


def convert_encoding(file):
    """
    此函数用于对不同编码格式的文件自动转换成Linux文件格式和UTF-8编码格式
    :param file: 输入的蛋白序列文件路径
    :return: 合格的蛋白序列文件路径
    """
    # dos2unix
    dos2unix_cmd = 'dos2unix -q {}'.format(file)
    subprocess.call(dos2unix_cmd, shell=True)
    # converting UTF-8
    blob = open(file, 'r').read()
    m = magic.Magic(mime_encoding=True)
    encoding = m.from_buffer(blob)
    if encoding not in ['utf-8', 'us-ascii']:
        logger.warning('[{}] is not encoded as UTF-8 but will be converted.'.format(file))
        s_list = os.path.splitext(file)
        new_file = '{}_uft-8{}'.format(s_list[0], s_list[1])
        source = open(file, 'r')
        target = open(new_file, 'w', encoding='UTF-8')
        target.write(source.read())
    else:
        new_file = file
    return new_file


def convert_fasta(seq_file):
    """
    此函数用于将不同来源的FASTA格式序列转换成我们搜库所需的序列, 并判断是否合格
    """
    record_dict = OrderedDict()
    unknown_record_dict = OrderedDict()
    invalid_titles = []
    invalid_proteins = []
    i = 1
    for seq_record in SeqIO.parse(seq_file, 'fasta'):
        if any(seq_record.id) is False:
            invalid_titles.append(str(i))
        seq_seq = seq_record.seq
        if is_protein(seq_seq) is False:
            invalid_proteins.append(seq_record.id)
        is_unknown, new_description = unknown_protein(seq_record.description)
        seq_record.description = new_description
        if is_unknown:
            unknown_record_dict[seq_record.id] = seq_record
        record_dict[seq_record.id] = seq_record
        i += 1
    if any(invalid_titles):
        # 是否有蛋白的ID为空，这是不允许的
        title_line = ', '.join(invalid_titles)
        logger.error('Detected some blank protein accessions, '
                     'please check sequence number(s): [{}].'.format(title_line))
        sys.exit(1)
    if any(invalid_proteins):
        # 是否有序列不符合蛋白质序列，这是不允许的
        accession_line = ''.join(invalid_proteins)
        logger.error('Detected some invalid protein sequences, '
                     'please check these accession(s): [{}].'.format(accession_line.strip()))
        sys.exit(1)
    return record_dict, unknown_record_dict


def hmm_run(pfam_scan, pfam_db, f_list):
    """
    此函数用于调用pfam_scan.pl来将查询序列与Pfam数据比对，进行功能域注释。
    """
    cmd = '{} -fasta {} -dir {} -cpu 2 > {}'.format(pfam_scan, f_list[0],
                                                    pfam_db, f_list[1])
    devnull = open(os.devnull, 'w')
    subprocess.call(cmd, shell=True, stdout=devnull, stderr=devnull)


def psortb_run(gram, seq_file, out_dir):
    """
    此函数用于调用本地安装的psortb软件对细菌或真菌的亚细胞结构定位进行预测。
    """
    cmd = 'psortb -i {} -{} -r {} -o terse'.format(seq_file, gram, out_dir)
    devnull = open(os.devnull, 'w')
    subprocess.call(cmd, shell=True, stdout=devnull, stderr=devnull)


def subcelluar_runner(parameters):
    """
    此函数用于多进程地调用psortb软件进行亚细胞结构定位注释。
    """
    logger.info("Psortb running.")
    gram = parameters[0]
    if gram is not None:
        if gram in ['p', 'positive']:
            gram = 'p'
        else:
            gram = 'n'
    else:
        gram = 'a'
    out_dir = parameters[1]
    seq_file_list = parameters[2]
    valid_threads = int(int(args.threads) / 2)
    pool = mp.Pool(valid_threads)
    for seq_file in seq_file_list:
        pool.apply_async(psortb_run, args=(gram, seq_file, out_dir))
    pool.close()
    pool.join()


def domain_runner(parameters):
    """
    此函数用于多进程地调用pfam_scan.pl+pfam进行功能域注释。
    """
    pfs = parameters[0]
    pfd = parameters[1]
    seq_file_list = parameters[2]
    pool = mp.Pool(int(args.threads))
    for files in seq_file_list:
        pool.apply_async(hmm_run, args=(pfs, pfd, files))
    pool.close()
    pool.join()


def combine(a_list, b_list, n):
    """
    此函数用于将两个列表去重复后合并并按照第一个列表的顺序排序
    :param a_list: first list
    :param b_list: second list
    :param n: a threshold to cut the combined list
    :return: a new combined list
    """
    a_list.extend(b_list)
    combine_set = set(a_list)
    new_list = list(combine_set)
    new_list.sort(key=a_list.index)
    if len(new_list) > n:
        new_list = new_list[:n]
    return new_list


class ProteinAnnotation:
    """
    Functional description, GO, COG/KOG, Domain, Subcellular location and KEGG annotation etc.
    """

    def __init__(self):
        # Initial parameters
        self.input = args.fasta
        self.part = pipeline
        self.rerun = False
        if 'cog' not in self.part:
            if 'go' not in self.part:
                self.rerun = True
        self.seq_records = None
        self.tax_status = False
        self.cog = 'KOG'
        self.cog_kog_db = '/home/DB/COG_KOG/'
        self.out_dir = args.output
        self.path = os.path.abspath(self.out_dir)
        self.threads = str(args.threads)
        self.ms = args.ms
        self.proteins = []
        self.tmp_dir = os.path.join(self.path, 'temp')
        os.makedirs(self.tmp_dir, exist_ok=True)
        self.tax_db = '/home/DB/egg_taxid.txt'
        self.tax_dict = defaultdict()
        self.kingdom_dict = defaultdict()
        self.cog_percent = 0.0
        self.go_percent = 0.0
        # Running parameters
        self.fasta = os.path.join(self.path, 'protein.fasta')
        # COG/KOG/GO parameters
        self.emapper = '/home/zyl/bin/eggNOG-mapper/emapper.py'
        self.eggnog_out = 'temp.emapper.annotations'
        self.ortho_out = 'temp.emapper.seed_orthologs'
        self.obofile = '/home/DB/GO/go-basic.obo'
        # Domain parameters
        self.pfam_scan = '/home/zyl/bin/PfamScan/PfamScan/pfam_scan.pl'
        self.xml = '/home/DB/PfamFamily.xml'
        self.mode = 'pfam'
        self.pfamscanout = ''
        self.pfamid2desc = None
        self.pfam_db = '/home/zyl/bin/PfamScan/PfamScan'
        self.tmp_file_list = []
        self.domain_percent = 0.0
        # KEGG parameters
        self.run_kegg = True
        self.kingdom = args.kingdom
        self.kegg_aln_file = ''
        self.kegg_db_dir = '/home/DB/KEGG/DIAMOND_DB'
        self.kegg_info_dir = '/home/DB/KEGG/Level_2_info'
        self.pathway_info = '/home/DB/KEGG/Pathway_info/KEGG_pathway_info.txt'
        self.ko_info = '/home/DB/KEGG/Pathway_info/KEGG_KO_info.txt'
        self.koio = '/home/zyl/bin/kegg_io.pl'
        self.ko_dict = defaultdict()
        self.pathway_dict = defaultdict()
        self.ko_pathway_dict = defaultdict(list)
        self.kegg_dict = defaultdict()
        self.kegg_percent = 0.0
        # PPI
        self.taxonomy = ''
        self.diamond = 'diamond'
        self.diamond_out = ''
        self.ppi_mapping = 'id_mapping.txt'
        self.string_db = '/FileDB/STRING_11.0'
        self.ppi_percent = 0.0
        # Subcelluar localization
        self.wolf = '/home/zyl/bin/WoLFPSort/bin/runWolfPsortSummary'
        self.gram = args.gram
        self.sub_dir = os.path.join(self.tmp_dir, 'psortb_out')
        self.sub_percent = 0.0
        # Statistics
        self.drawing = False
        self.result_dict = OrderedDict()
        # Add description
        self.unknown_records = None
        self.unknown_fasta = os.path.join(self.tmp_dir, 'unknown_proteins.fasta')
        self.uniref_db = '/home/DB/UniRef/uniref90.dmnd'
        self.aln_out = os.path.join(self.tmp_dir, 'UniRef_alignment.txt')
        self.description_stat = []

    def fetch_taxonomy(self):
        """
        此函数用于读取eggNOG数据库中的taxid文件信息。
        """
        with open(self.tax_db, 'r') as f1:
            for each_line in f1.readlines()[1:]:
                a_list = each_line.strip().split('\t')
                tax_id = a_list[0]
                tax_name = a_list[1]
                rank = a_list[2]
                if rank == 'virus':
                    kingdom_name = 'Viruses'
                else:
                    kingdom_name = a_list[3].split(',')[2]
                self.tax_dict[tax_id] = tax_name
                self.kingdom_dict[tax_id] = kingdom_name

    def fetch_sequences(self):
        """
        此函数用于筛选合格的蛋白序列
        """
        if self.ms:
            with open(self.ms, 'r') as f2:
                identified_records = {}
                missing_proteins = []
                for each_line in f2.readlines()[1:]:
                    m_list = each_line.strip().split('\t')
                    prot_name = m_list[0]
                    self.proteins.append(prot_name)
                    if prot_name in self.seq_records:
                        identified_records[prot_name] = self.seq_records[prot_name]
                    else:
                        missing_proteins.append(prot_name)
                if any(missing_proteins) is True:
                    missing_line = ', '.join(missing_proteins)
                    logger.warning('[{}] is/are not in the fasta file!'.format(missing_line))
                self.seq_records = identified_records
        else:
            self.proteins = list(self.seq_records.keys())

    def uniref_alignment(self):
        """
        此函数用于调用DIAMOND对UniRef数据库进行比对
        """
        cmd = '{} blastp -d {} -q {} -o {} -p {} ' \
              '-f 6 qseqid stitle pident evalue bitscore'.format(self.diamond,
                                                                 self.uniref_db,
                                                                 self.unknown_fasta,
                                                                 self.aln_out,
                                                                 str(self.threads))
        devnull = open(os.devnull, 'w')
        try:
            logger.info("running DIAMOND in [{}] threads.".format(str(self.threads)))
            subprocess.call(cmd, shell=True, stdout=devnull, stderr=devnull)
        except OSError:
            logger.info("Try to run [DIAMOND] but failed, please check.")
            logger.error(last_exception())
            sys.exit(1)

    def parse_description(self):
        """
        此函数用于解析UniRef数据库比对后得到的蛋白功能描述信息
        """
        if os.path.exists(self.aln_out) and os.path.getsize(self.aln_out):
            annotated_proteins = []
            with open(self.aln_out, 'r') as f:
                for each_line in f.readlines():
                    a_list = each_line.strip().split('\t')
                    query_id = a_list[0]
                    if query_id not in annotated_proteins:
                        anno_line = a_list[1].split(' ', maxsplit=1)[1]
                        ref_description = anno_line.split(' n=')[0]
                        if good_annotation(ref_description):
                            tmp_description = str(self.seq_records[query_id].description)
                            if 'OS=' in tmp_description:
                                info_line = 'OS={}'.format(tmp_description.split(' OS=')[1])
                                new_description = '(predicted) {} {}'.format(ref_description,
                                                                             info_line)
                            else:
                                if 'OX=' in tmp_description:
                                    info_line = 'OX={}'.format(tmp_description.split(' OX=')[1])
                                    new_description = '(predicted) {} {}'.format(ref_description,
                                                                                 info_line)
                                else:
                                    new_description = '(predicted) {}'.format(ref_description)
                            self.seq_records[query_id].description = new_description
                            annotated_proteins.append(query_id)
        else:
            logger.warning("[{}] does not exist or is empty, please check.".format(self.aln_out))

    def add_description(self):
        """
        此函数用于基于UniRef数据库进行未知功能或未注释的蛋白进行功能注释
        """
        logger.info("Trying to annotate all unknown proteins from UniRef database.")
        record_list = []
        if any(self.unknown_records):
            for protein_id, seq_record in self.unknown_records.items():
                record_list.append(seq_record)
            SeqIO.write(sequences=record_list, handle=self.unknown_fasta, format='fasta')
            self.uniref_alignment()
            self.parse_description()
        else:
            logger.info('No unknown or predicted proteins need to be re-annotated.')

    def sequence_preparation(self):
        """
        此函数用于对输入的蛋白序列进行自动化的格式判断、蛋白提取和未知蛋白重注释
        """
        self.input = convert_encoding(self.input)
        logger.info("Preparing protein sequences.")
        if is_fasta(self.input):
            self.seq_records, self.unknown_records = convert_fasta(self.input)
        else:
            logger.error("[{}] is not a valid FASTA format file.".format(self.input))
            sys.exit(1)
        self.fetch_sequences()
        if args.add:
            self.add_description()
        final_records = []
        known_protein_num = 0
        unknown_protein_num = 0
        for protein in self.proteins:
            record = self.seq_records[protein]
            is_unknown, new_description = unknown_protein(record.description)
            if is_unknown:
                unknown_protein_num += 1
            else:
                known_protein_num += 1
            final_records.append(record)
        self.description_stat = [unknown_protein_num, known_protein_num]
        SeqIO.write(sequences=final_records, handle=self.fasta, format='fasta')
        proteins_out = os.path.join(self.path, 'protein.txt')
        with open(proteins_out, 'w') as f3:
            for prot in self.proteins:
                f3.write(prot + '\n')

    def eggnog_mapper(self):
        """
        此函数用于调用eggNOG-mapper进行功能注释。
        """
        os.chdir(self.path)
        cmd = 'python2 {} -i {} -m diamond --cpu {} -o temp ' \
              '--no_file_comments'.format(self.emapper, self.fasta, self.threads)
        devnull = open(os.devnull, 'w')
        try:
            logger.info("eggNOG-mapper version 2.0 running.")
            subprocess.call(cmd, shell=True, stdout=devnull, stderr=devnull)
        except OSError:
            logger.info("Try to run [emapper.py] but failed, please check.")
            logger.error(last_exception())
            sys.exit(1)
        os.chdir(working_dir)
        temp_anno = os.path.join(self.path, self.eggnog_out)
        temp_ortho = os.path.join(self.path, self.ortho_out)
        bak_anno = os.path.join(self.tmp_dir, self.eggnog_out)
        bak_ortho = os.path.join(self.tmp_dir, self.ortho_out)
        shutil.move(temp_anno, bak_anno)
        shutil.move(temp_ortho, bak_ortho)

    def path_exist(self, item):
        # 读取eggNOG输出结果
        path_1 = os.path.join(self.path, item)
        path_2 = os.path.join(self.tmp_dir, item)
        move = False
        eggnog_out_file = ''
        if os.path.exists(path_1):
            eggnog_out_file = path_1
            move = True
        elif os.path.exists(path_2):
            eggnog_out_file = path_2
        else:
            logger.warning('No previous file [{}], rerunning eggNOG.'.format(self.eggnog_out))
            self.eggnog_mapper()
        return move, eggnog_out_file

    def taxonomy_assignment(self):
        """
        此函数用于根据eggNOG-mapper结果中的直系同源基因的数量来判断亲缘关系最近的参考物种及其所属的界和属。
        """
        if 'meta' in args.part:
            self.cog = 'COG'
        else:
            self.fetch_taxonomy()
            tax_list = []
            move, temp_ortho = self.path_exist(item=self.ortho_out)
            with open(temp_ortho, 'r') as f:
                for each_line in f.readlines():
                    a_list = each_line.strip('\r|\n').split('\t')
                    tax_id = a_list[1].split('.')[0]
                    tax_list.append(tax_id)
            count_id = Counter(tax_list)
            self.taxonomy = count_id.most_common(1)[0][0]
            best_taxonomy = self.tax_dict[self.taxonomy]
            logger.info('The most closely referenced taxonomy is [{}: {}]'.format(self.taxonomy,
                                                                                  best_taxonomy))
            super_kingdom = self.kingdom_dict[self.taxonomy]
            if super_kingdom not in ['Eukaryota', 'Viruses']:
                self.cog = 'COG'
                if self.kingdom not in ['bacteria', 'archaea']:
                    logger.warning('Your query organism does not belong to [{}]. '
                                   'Please check its COG/KOG annotation results.'.format(self.kingdom))

    def cog_filter(self, y):
        # 获取蛋白ID对应COG/KOG
        cog_id = re.sub(r'.*,', '', y.split('@')[-2])
        if cog_id.startswith('KOG') or self.cog.startswith('COG'):
            return cog_id

    def move_temp_files(self, move):
        if move is True:
            temp_anno_file = os.path.join(self.path, self.eggnog_out)
            temp_ortho_file = os.path.join(self.path, self.ortho_out)
            bak_anno_file = os.path.join(self.tmp_dir, self.eggnog_out)
            bak_ortho_file = os.path.join(self.tmp_dir, self.ortho_out)
            shutil.move(temp_anno_file, bak_anno_file)
            shutil.move(temp_ortho_file, bak_ortho_file)

    def cog_annotation(self):
        """此函数用于解析COG/KOG的结果。"""
        logger.info("Fetching COG data from eggNOG-mapper results.")
        move, eggnog_out = self.path_exist(item=self.eggnog_out)
        eggnog = pd.read_csv(eggnog_out, sep='\t', header=None, low_memory=False)
        # COG/KOG id to description
        cog_file = os.path.join(self.cog_kog_db, '{}.description.txt'.format(self.cog))
        id2description = pd.read_csv(cog_file, sep='\t', header=None, low_memory=False,
                                     names=[self.cog + ' NO.', self.cog + ' description'])
        # 取三列：1.Protein accession 2.Category 3.ID
        df_cog = eggnog.iloc[:, [0, 20, 18]]
        df_cog.columns = ['Protein accession', self.cog + ' category', self.cog + ' NO.']
        df_cog = df_cog.copy(deep=True)
        no = self.cog + ' NO.'
        df_cog[no] = df_cog[no].apply(lambda x: self.cog_filter(x))
        df_cog = pd.merge(df_cog, id2description, on=self.cog + ' NO.', how='left')
        df_cog.dropna(how='any', inplace=True)
        cog_anno_file = os.path.join(self.path, 'COG_annotation.xls')
        protein_cog_num = df_cog.shape[0]
        self.cog_percent = calculate_percentage(protein_cog_num, len(self.proteins))
        df_cog.to_csv(cog_anno_file, sep='\t', index=False)
        if move is True:
            temp_anno = os.path.join(self.path, self.eggnog_out)
            temp_ortho = os.path.join(self.path, self.ortho_out)
            bak_anno = os.path.join(self.tmp_dir, self.eggnog_out)
            bak_ortho = os.path.join(self.tmp_dir, self.ortho_out)
            shutil.move(temp_anno, bak_anno)
            shutil.move(temp_ortho, bak_ortho)
        self.move_temp_files(move)

    def go_annotation(self):
        """
        此函数用于解析eggNOG-mapper结果中的GO注释信息，需要使用goatools模块。
        """
        move, eggnog_out = self.path_exist(item=self.eggnog_out)
        # df_go是一个Pandas DataFrame
        logger.info("Fetching GO data from eggNOG-mapper results.")
        go_pd = pd.read_csv(eggnog_out, sep='\t', index_col=0,
                            header=None, low_memory=False)
        go_pd = go_pd.iloc[:, 5]
        go_pd.dropna(how='any', inplace=True)
        protein_go_num = go_pd.shape[0]
        self.go_percent = calculate_percentage(protein_go_num, len(self.proteins))
        # 将 GO id这列分成多列，expand=True参数将字符串拆分成多列
        go_pd = go_pd.str.split(',', expand=False)
        try:
            go_obo = obo_parser.GODag(self.obofile, load_obsolete=True, prt=None)
            all_go = obo_parser.OBOReader(self.obofile)
        except IOError:
            logger.error(
                "Can not load GO obo file: [{}], please check.".format(self.obofile)
            )
            logger.error(last_exception())
            sys.exit(1)
        all_go_list = []
        for i in all_go:
            all_go_list.append(i.id)
        term_2_list = ['Protein accession', 'Biological Process',
                       'Cellular Component', 'Molecular Function']
        term_2_lines = '\t'.join(term_2_list) + '\n'
        go_anno_file = os.path.join(self.path, 'GO_annotation.xls')
        with open(go_anno_file, 'w') as f1:
            go_lines = ''
            for protein, go_list in go_pd.iteritems():
                term_dict = defaultdict()
                term_dict.setdefault('BP', [])
                term_dict.setdefault('CC', [])
                term_dict.setdefault('MF', [])
                for go_id in go_list:
                    if go_id in all_go_list:
                        g = go_obo.query_term(go_id)
                        name_space = str(g.namespace)
                        go_level = str(g.level)
                        if go_level != '0':
                            go_term = str(g.name)
                            com = '{} {};'.format(str(go_id), go_term)
                            if name_space == 'biological_process':
                                name_space = 'Biological Process'
                                term_dict['BP'].append(com)
                            elif name_space == 'molecular_function':
                                name_space = 'Molecular Function'
                                term_dict['MF'].append(com)
                            else:
                                name_space = 'Cellular Component'
                                term_dict['CC'].append(com)
                            go_lines += '\t'.join([protein, name_space, str(go_id),
                                                   go_level, go_term]) + '\n'
                bp_line = ' '.join(term_dict['BP'])
                cc_line = ' '.join(term_dict['CC'])
                mf_line = ' '.join(term_dict['MF'])
                term_2_lines += '{}\t{}\t{}\t{}\n'.format(protein, bp_line,
                                                          cc_line, mf_line)
            f1.write(go_lines)
        go_term_file = os.path.join(self.path, 'GO_Terms_Level_2_annotation.xls')
        with open(go_term_file, 'w') as f2:
            f2.write(term_2_lines)
        self.move_temp_files(move)

    def cog_go(self):
        """
        此函数用于运行COG/KOG和GO注释这一步骤。
        """
        if 'cog' in self.part:
            if 'go' in self.part:
                logger.info("Start COG/KOG and GO annotation.")
                self.eggnog_mapper()
                self.taxonomy_assignment()
                self.cog_annotation()
                self.go_annotation()
                self.tax_status = True
            else:
                logger.info("Start COG annotation.")
                self.eggnog_mapper()
                self.taxonomy_assignment()
                self.cog_annotation()
                self.tax_status = True
        else:
            if 'go' in self.part:
                logger.info("Start GO annotation.")
                self.eggnog_mapper()
                if 'ppi' in self.part or 'kegg' in self.part:
                    self.taxonomy_assignment()
                    self.tax_status = True
                self.go_annotation()

    def seqs_separation(self):
        """
        此函数按照进程数将原始的protein.fasta文件进行等比拆分，
        得到相同数目的子序列文件，便于并行运算。
        :return: parameters为一个列表(list)，包含三个元素，
        分别是pfam_scan.pl路径，pfam数据库路径和生成的子序列文件路径的列表。
        """
        part = int(self.threads)
        tmp_seqs_dir = os.path.join(self.tmp_dir, 'separate_seqs')
        os.makedirs(tmp_seqs_dir, exist_ok=True)
        seq_dict = OrderedDict()
        i = 0
        input_fasta = os.path.join(self.path, self.fasta)
        for record in SeqIO.parse(input_fasta, 'fasta'):
            seq_dict[i] = SeqRecord(seq=record.seq, id=record.id, description='')
            i += 1
        div_groups = np.array_split(list(seq_dict.keys()), part)
        j = 0
        for group in div_groups:
            if len(group) > 0:
                group_records = []
                group_seq_file = os.path.join(tmp_seqs_dir, 'sub_fasta_{}.fasta'.format(str(j)))
                if self.mode == 'pfam':
                    tmp_out_file = os.path.join(tmp_seqs_dir, 'sub_{}_out_{}.txt'.format(self.mode, str(j)))
                    self.tmp_file_list.append([group_seq_file, tmp_out_file])
                else:
                    self.tmp_file_list.append(group_seq_file)
                for seq_index in group:
                    group_records.append(seq_dict[seq_index])
                SeqIO.write(sequences=group_records, handle=group_seq_file, format='fasta')
                j += 1
        if self.mode == 'pfam':
            parameters = [self.pfam_scan, self.pfam_db, self.tmp_file_list]
        else:
            parameters = [self.gram, self.sub_dir, self.tmp_file_list]
        return parameters

    def parse_pfam(self):
        """
        此函数通过解析Pfam_Family.xml文件，
        得到Pfam_id对应的Domain描述信息，写入Pfam_Family.xls文件。
        """
        id2desc = defaultdict()
        try:
            logger.info("Fetching PFAM result.")
            tree = XeT.parse(self.xml)
            root = tree.getroot()
            for entry in root.findall('.//entry'):
                entryid = entry.attrib['acc']
                desc = entry.find('description').text
                id2desc[entryid] = desc
            data = pd.Series(id2desc)
            pfamid2desc = {'Pfam ID': data.index,
                           'Domain description': data.values}
            self.pfamid2desc = pd.DataFrame(pfamid2desc)
        except IOError:
            logger.error(
                "[{0}] does not exist or is locked, please check".format(self.xml)
            )
            logger.error(last_exception())
            sys.exit(1)

    def domain_annotation(self):
        """
        此函数用于解析各个pfam运行的结果，得到蛋白质功能域的结果。
        """
        result_lines = ''
        for each_file in self.tmp_file_list:
            with open(each_file[1], 'r') as f1:
                all_lines = f1.readlines()
                for each_line in all_lines[29:]:
                    result_lines += each_line
        self.pfamscanout = os.path.join(self.tmp_dir, 'combined_pfam_out.txt')
        with open(self.pfamscanout, 'w') as f2:
            f2.write(result_lines)
        prot2pfam = defaultdict()
        pfam2desc = defaultdict()
        pfam_pd = pd.read_csv(self.pfamscanout, sep='\t', header=None)
        pfam_pd = pfam_pd.iloc[:, 0].apply(lambda x: '\t'.join(x.split()))
        pfam_pd = pfam_pd.str.split('\t', expand=True)
        pfam_pd = pfam_pd[pfam_pd.iloc[:, 7] == 'Domain']
        pfam_pd = pfam_pd.iloc[:, [0, 5]]
        pfam_pd.columns = ['Protein accession', 'Pfam ID']
        pfam_pd['Pfam ID'] = pfam_pd['Pfam ID'].apply(lambda x: x.split('.')[0])
        pfam_pd.drop_duplicates(keep='first', inplace=True)
        pfam_pd = pd.merge(pfam_pd, self.pfamid2desc, on='Pfam ID', how='left')
        pfam_pd.dropna(how='any', inplace=True)
        for index, row in pfam_pd.iterrows():
            prot2pfam.setdefault(row['Protein accession'], []).append(row['Pfam ID'])
            pfam2desc[row['Pfam ID']] = row['Domain description']
        domain_anno_file = os.path.join(self.path, 'Domain_annotation.xls')
        domain_result_lines = 'Protein accession\tPfam ID\tDomain description\n'
        protein_domain_num = 0
        with open(domain_anno_file, 'w') as o:
            for k, v in prot2pfam.items():
                protein_domain_num += 1
                v_line = '; '.join(v)
                p_line = '; '.join([pfam2desc[i] for i in v])
                domain_result_lines += '{}\t{}\t{}\n'.format(k, v_line, p_line)
            o.write(domain_result_lines)
        self.domain_percent = calculate_percentage(protein_domain_num, len(self.proteins))
        temp_pfam = os.path.join(self.tmp_dir, self.pfamscanout)
        shutil.move(self.pfamscanout, temp_pfam)

    def kegg_aln(self):
        """
        此函数用于将输入的KEGG参考物种的蛋白质序列进行合并然后用于对查询序列protein.fasta的比对。
        """
        self.kegg_aln_file = os.path.join(self.tmp_dir, 'KEGG_alignment_output.txt')
        reference_db = os.path.join(self.kegg_db_dir, self.kingdom)
        query_seq_file = os.path.join(self.path, self.fasta)
        aln_cmd = '{} blastp -d {} -q {} -p {} -o {}'.format(self.diamond,
                                                             reference_db,
                                                             query_seq_file,
                                                             self.threads,
                                                             self.kegg_aln_file)
        devnull = open(os.devnull, 'w')
        try:
            logger.info("DIAMOND (KEGG) is running in [{}] threads.".format(self.threads))
            subprocess.call(aln_cmd, shell=True, stdout=devnull, stderr=devnull)
        except OSError:
            logger.info("Try to run [{}] but failed, please check.".format(self.diamond))
            logger.error(last_exception())
            sys.exit(1)

    def load_kegg_pathway(self):
        """
        此函数可以用于读取KEGG中所有KO和Pathway的信息
        """
        if not os.path.exists(self.ko_info):
            logger.error('[{}] does not exist or is locked, '
                         'please check.'.format(self.ko_info))
            sys.exit(1)
        with open(self.ko_info, 'r') as f:
            for each_line in f.readlines()[1:]:
                f_list = each_line.strip().split('\t')
                ko_id = f_list[0]
                ko_annotation = f_list[1]
                self.ko_dict[ko_id] = ko_annotation
        if not os.path.exists(self.pathway_info):
            logger.error('[{}] does not exist or is locked, '
                         'please check.'.format(self.pathway_info))
            sys.exit(1)
        if 'meta' in args.part:
            kingdom_list = ['fungi', 'bacteria']
        else:
            kingdom_list = [self.kingdom]
        with open(self.pathway_info, 'r') as k:
            for each_line in k.readlines()[1:]:
                p_list = each_line.strip().split('\t')
                # KO id: K00001
                ko_id = p_list[2]
                # Pathway ID: map00010
                path_id = p_list[0].split(',')[1]
                # Pathway classification
                path_class = p_list[-2]
                # Pathway description
                organism_level = p_list[-1].split(',')
                valid = False
                for each_kingdom in kingdom_list:
                    if each_kingdom in organism_level:
                        valid = True
                        break
                if valid:
                    path_annotation = p_list[1]
                    self.pathway_dict[path_id] = [path_annotation, path_class]
                    self.ko_pathway_dict[ko_id].append(path_id)

    def parse_kegg(self):
        """
        此函数用于解析蛋白序列与KEGG数据库用DIAMOND比对得到的结果文件。
        """
        level_ko_file = os.path.join(self.kegg_info_dir,
                                     '{}.txt'.format(self.kingdom))
        if not os.path.exists(level_ko_file):
            logger.error('[{}] does not exist or is locked, '
                         'please check.'.format(level_ko_file))
            sys.exit(1)
        ko_dict = defaultdict()
        with open(level_ko_file, 'r') as f:
            for k_line in f.readlines()[1:]:
                a_list = k_line.strip().split('\t')
                ko_dict[a_list[0]] = a_list[1]
        result_dict = defaultdict(list)
        with open(self.kegg_aln_file, 'r') as g:
            for each_line in g.readlines()[1:]:
                b_list = each_line.strip().split('\t')
                query_id = b_list[0]
                ref_id = b_list[1]
                result_dict[query_id].append(ref_id)
        for query_protein, ref_list in result_dict.items():
            for ref in ref_list:
                if ref in ko_dict:
                    ko_id = ko_dict[ref]
                    kegg_gene = ''
                    if ko_id in self.ko_dict:
                        kegg_gene = self.ko_dict[ko_id]
                    pathway_line = ''
                    classification_line = ''
                    if ko_id in self.ko_pathway_dict:
                        pathway_list = self.ko_pathway_dict[ko_id]
                        class_dict = {}
                        map_list = []
                        for pathway_id in pathway_list:
                            pathway_info = self.pathway_dict[pathway_id]
                            pathway_annotation = pathway_info[0]
                            pathway_classification = pathway_info[1]
                            class_dict[pathway_classification] = 1
                            map_list.append('{} {}'.format(pathway_id, pathway_annotation))
                        pathway_line = '; '.join(map_list)
                        pathway_classes = sorted(class_dict.keys())
                        classification_line = ', '.join(pathway_classes)
                    self.kegg_dict[query_protein] = [query_protein, ko_id, kegg_gene,
                                                     pathway_line, classification_line]
                    break

    def kegg_meta(self):
        """
        此函数用于解析eggNOG-mapper结果中的KEGG注释信息，目前用于宏蛋白质组的KEGG pathway注释。
        """
        logger.info("Fetching KEGG data from eggNOG-mapper results.")
        # 读取eggNOG输出结果
        move, eggnog_out = self.path_exist(item=self.eggnog_out)
        with open(eggnog_out, 'r') as f:
            for each_line in f.readlines():
                a_list = each_line.strip('\r|\n').split('\t')
                query_protein = a_list[0]
                if a_list[8]:
                    tmp_ko_list = str(a_list[8]).split(',')
                    if len(tmp_ko_list) > 1:
                        is_pathway = False
                        for tmp_ko in tmp_ko_list:
                            new_ko = tmp_ko.strip().replace('ko:', '')
                            if new_ko in self.ko_pathway_dict:
                                is_pathway = True
                                good_ko = new_ko
                                break
                    else:
                        good_ko = tmp_ko_list[0].strip().replace('ko:', '')
                        is_pathway = True
                    kegg_gene = ''
                    if good_ko in self.ko_dict:
                        kegg_gene = self.ko_dict[good_ko]
                    pathway_line = ''
                    if is_pathway:
                        for map_id in self.ko_pathway_dict[good_ko]:
                            map_description = self.pathway_dict[map_id]
                            pathway_line += '{} {}; '.format(map_id, map_description)
                    self.kegg_dict[query_protein] = [query_protein, good_ko, kegg_gene,
                                                     pathway_line.strip('; ')]
        self.move_temp_files(move)

    def write_out(self):
        """
        此函数用于将KEGG的结果输出到对应的结果文件中。
        """
        logger.info("Generating KEGG_pathway_annotation.xls.")
        kegg_anno_file = os.path.join(self.path, 'KEGG_pathway_annotation.xls')
        result_lines = 'Protein accession\tKEGG KO No.\tKEGG Gene\t' \
                       'KEGG pathway\tPathway classification\n'
        protein_kegg_num = 0
        for protein in self.proteins:
            if protein in self.kegg_dict:
                kegg_list = self.kegg_dict[protein]
                protein_kegg_num += 1
            else:
                kegg_list = [protein, '', '', '', '']
            result_lines += '\t'.join(kegg_list) + '\n'
        self.kegg_percent = calculate_percentage(protein_kegg_num, len(self.proteins))
        with open(kegg_anno_file, 'w') as f:
            f.write(result_lines)
        os.chdir(self.path)
        devnull = open(os.devnull, 'w')
        try:
            logger.info("Generating [kolist.txt] & [pathway.txt].")
            cmd = 'perl {} {}'.format(self.koio, kegg_anno_file)
            subprocess.call(cmd, shell=True, stdout=devnull, stderr=devnull)
        except OSError:
            logger.error("Try to run [{}] but failed, please check.".format(self.koio))
            sys.exit(1)
        os.chdir(working_dir)

    def kegg(self):
        """
        此函数用于自动运行KEGG pathway注释这一步骤。
        """
        if 'kegg' in self.part:
            logger.info("Start KEGG annotation.")
            self.load_kegg_pathway()
            if 'meta' in args.part:
                self.kegg_meta()
            else:
                self.kegg_aln()
                self.parse_kegg()
            self.write_out()

    def string_copy(self):
        """
        此函数用于将STRING数据库中对应物种的links和sequences数据拷贝到结果路径下面进行比对。
        """
        db_links_dir = os.path.join(self.string_db, 'links')
        db_links_file = os.path.join(db_links_dir, '{}.txt'.format(self.taxonomy))
        tmp_links_file = os.path.join(self.path, 'protein.links.txt')
        try:
            shutil.copy(db_links_file, tmp_links_file)
        except IOError:
            logger.error("[{}] does existed or is locked, please check".format(db_links_file))
            sys.exit(1)
        db_seqs_dir = os.path.join(self.string_db, 'sequences')
        db_seqs_file = os.path.join(db_seqs_dir, '{}.fasta'.format(self.taxonomy))
        tmp_seqs_file = os.path.join(self.tmp_dir, '{}.ppi.fasta'.format(self.taxonomy))
        try:
            shutil.copy(db_seqs_file, tmp_seqs_file)
        except IOError:
            logger.error("[{}] does not existed or is locked, please check".format(db_seqs_file))
            sys.exit(1)
        return tmp_seqs_file

    def string_aln(self):
        """
        此函数用于将STRING数据库中对应物种的Sequences序列与查询蛋白序列进行DIAMOND比对。
        """
        os.chdir(self.tmp_dir)
        db_cmd = '{0} makedb --in {1}.ppi.fasta -d {1}'.format(self.diamond, self.taxonomy)
        self.diamond_out = os.path.join(self.tmp_dir, 'diamond.txt')
        input_fasta = os.path.join(self.path, self.fasta)
        aln_cmd = '{} blastp -d {} -q {} -p {} --max-target-seqs 1 ' \
                  '-o diamond.txt'.format(self.diamond, self.taxonomy,
                                          input_fasta, self.threads)
        devnull = open(os.devnull, 'w')
        try:
            logger.info("DIAMOND (PPI) is running in [{}] threads.".format(str(self.threads)))
            subprocess.call(db_cmd, shell=True, stdout=devnull, stderr=devnull)
            subprocess.call(aln_cmd, shell=True, stdout=devnull, stderr=devnull)
        except OSError:
            logger.error("Try to run [{}] but failed, please check.".format(self.diamond))
            sys.exit(1)
        os.remove('{}.dmnd'.format(self.taxonomy))
        os.chdir(working_dir)

    def parse_ppi(self):
        """
        此函数用于解析string_aln()函数中的DIAMOND比对结果，得到查询序列与参考物种之间的蛋白质同源信息。
        """
        ppi_anno_file = os.path.join(self.path, self.ppi_mapping)
        result_lines = ''
        protein_ppi_num = 0
        with open(self.diamond_out, 'r') as f1:
            for each_line in f1.readlines():
                a_list = each_line.strip().split('\t')
                protein_ppi_num += 1
                result_lines += '\t'.join(a_list[0:3]) + '\t' + a_list[-1] + '\n'
        self.ppi_percent = calculate_percentage(protein_ppi_num, len(self.proteins))
        with open(ppi_anno_file, 'w') as f2:
            f2.write(result_lines)

    def ppi(self):
        """
        此函数用于自动运行蛋白质互作网络PPI的注释。
        """
        if 'ppi' in self.part:
            logger.info("Start PPI annotation.")
            if self.tax_status is False:
                self.taxonomy_assignment()
                self.tax_status = True
            self.string_copy()
            self.string_aln()
            self.parse_ppi()

    def sub_prediction(self):
        """
        此函数用于判断调用wolf_psort或者psortb来对蛋白组进行亚细胞结构定位
        """
        self.mode = 'sub'
        if self.kingdom in ['animal', 'plant', 'fungi', 'protist']:
            software = 'wolf_psort'
        else:
            software = 'psortb'
            if os.path.exists(self.sub_dir):
                shutil.rmtree(self.sub_dir)
            os.makedirs(self.sub_dir)
        return software

    def parse_psortb(self):
        """
        此函数用于解析Psortb软件所预测的原核生物蛋白的亚细胞结构定位结果进行解析
        """
        result_dict = defaultdict()
        for root, dirs, files in os.walk(self.sub_dir):
            for each_file in files:
                if 'txt' in each_file:
                    f_path = os.path.join(self.sub_dir, each_file)
                    with open(f_path, 'r') as f:
                        for each_line in f.readlines()[1:]:
                            if each_line:
                                s_list = each_line.strip().split('\t')
                                if len(s_list) > 1:
                                    protein = s_list[0].strip()
                                    location = s_list[1]
                                    result_dict[protein] = location
        sub_anno_file = os.path.join(self.path, 'Subcellular_location_annotation.xls')
        result_lines = 'Protein accession\tSubcellular localization\n'
        protein_sub_num = 0
        with open(sub_anno_file, 'w') as o:
            for each_protein in self.proteins:
                if each_protein in result_dict:
                    p_location = result_dict[each_protein]
                    if p_location != 'Unknown':
                        protein_sub_num += 1
                else:
                    p_location = 'Unknown'
                result_lines += '{}\t{}\n'.format(each_protein, p_location)
            o.write(result_lines)
        self.sub_percent = calculate_percentage(protein_sub_num, len(self.proteins))

    def wolf_psort(self):
        """
        此函数用于调用WoLFPsort软件来对真核生物（动物、植物和真菌）蛋白的亚细胞结构定位进行预测
        """
        sub_anno_file = os.path.join(self.path, 'Subcellular_location_annotation.xls')
        input_fasta = os.path.join(self.path, self.fasta)
        if self.kingdom == 'protist':
            self.kingdom = 'animal'
        cmd = '{} {} < {} > {}'.format(self.wolf, self.kingdom,
                                       input_fasta, sub_anno_file)
        devnull = open(os.devnull, 'w')
        try:
            logger.info("WoLFPSort running.")
            subprocess.call(cmd, shell=True, stdout=devnull, stderr=devnull)
        except OSError:
            logger.error("Try to run [{}] but failed, please check.".format(self.wolf))
            logger.error(last_exception())
            sys.exit(1)
        protein_sub_num = 0
        if os.path.getsize(sub_anno_file) > 0:
            with open(sub_anno_file, 'r') as o:
                all_lines = o.readlines()
                protein_sub_num = len(all_lines[1:])
        self.sub_percent = calculate_percentage(protein_sub_num, len(self.proteins))

    def read_results(self):
        """
        此函数用于读取各数据库注释结果，计算已注释蛋白占全部蛋白的比例
        """
        # COG percentage
        cog_anno_file = os.path.join(self.path, 'COG_annotation.xls')
        protein_cog_num = 0
        if os.path.exists(cog_anno_file):
            if os.path.getsize(cog_anno_file) > 0:
                with open(cog_anno_file, 'r') as c:
                    protein_cog_num = len(c.readlines()[1:])
        self.cog_percent = calculate_percentage(protein_cog_num, len(self.proteins))
        self.result_dict['cog'] = [self.cog, self.cog_percent]
        # GO percentage
        go_anno_file = os.path.join(self.path, 'GO_annotation.xls')
        protein_go_num = 0
        if os.path.exists(go_anno_file):
            if os.path.getsize(go_anno_file) > 0:
                protein_list = []
                with open(go_anno_file, 'r') as g:
                    for each_line in g.readlines():
                        g_list = each_line.strip('\r|\n').split('\t')
                        protein_list.append(g_list[0])
                protein_go_num = len(set(protein_list))
        self.go_percent = calculate_percentage(protein_go_num, len(self.proteins))
        self.result_dict['go'] = ['GO', self.go_percent]
        # Domain percentage
        domain_anno_file = os.path.join(self.path, 'Domain_annotation.xls')
        protein_domain_num = 0
        if os.path.exists(domain_anno_file):
            if os.path.getsize(domain_anno_file) > 0:
                with open(domain_anno_file, 'r') as d:
                    protein_domain_num = len(d.readlines()[1:])
        self.domain_percent = calculate_percentage(protein_domain_num, len(self.proteins))
        self.result_dict['domain'] = ['Domain', self.domain_percent]
        # KEGG percentage
        kegg_anno_file = os.path.join(self.path, 'KEGG_pathway_annotation.xls')
        protein_kegg_num = 0
        if os.path.exists(kegg_anno_file):
            if os.path.getsize(kegg_anno_file) > 0:
                with open(kegg_anno_file, 'r') as k:
                    for each_line in k.readlines()[1:]:
                        k_list = each_line.strip('\r|\n').split('\t')
                        if k_list[1]:
                            protein_kegg_num += 1
        self.kegg_percent = calculate_percentage(protein_kegg_num, len(self.proteins))
        self.result_dict['kegg'] = ['KEGG', self.kegg_percent]
        # PPI percentage
        ppi_anno_file = os.path.join(self.path, self.ppi_mapping)
        protein_ppi_num = 0
        if os.path.exists(ppi_anno_file):
            if os.path.getsize(ppi_anno_file) > 0:
                with open(ppi_anno_file, 'r') as p:
                    protein_ppi_num = len(p.readlines()[1:])
        self.ppi_percent = calculate_percentage(protein_ppi_num, len(self.proteins))
        self.result_dict['ppi'] = ['PPI', self.ppi_percent]
        # Subcelluar percentage
        subcelluar_anno_file = os.path.join(self.path, 'Subcellular_location_annotation.xls')
        protein_sub_num = 0
        if os.path.exists(subcelluar_anno_file):
            if os.path.getsize(subcelluar_anno_file) > 0:
                with open(subcelluar_anno_file, 'r') as s:
                    for each_line in s.readlines()[1:]:
                        s_list = each_line.strip().split('\t')
                        loc = s_list[1]
                        if loc != 'Unknown':
                            protein_sub_num += 1
        self.sub_percent = calculate_percentage(protein_sub_num, len(self.proteins))
        self.result_dict['sub'] = ['Subcelluar', self.sub_percent]

    def annotation_statistics(self):
        """
        此函数用于将各数据结果的统计结果写入文本并画条形图
        """
        real_time = time.strftime('%Y%m%d_%H%M', time.localtime(time.time()))
        statistics_file = os.path.join(self.out_dir,
                                       'Statistics_{}.txt'.format(real_time))
        result_picture = os.path.join(self.out_dir,
                                      'Statistics_{}.png'.format(real_time))
        # 每个柱子对应值的序列
        if 'meta' in args.part or 'all' in args.part or 'virus' in args.part:
            values = [self.cog_percent, self.go_percent, self.domain_percent,
                      self.kegg_percent, self.ppi_percent, self.sub_percent]
            tmp_items = ['cog', 'go', 'domain', 'kegg', 'ppi', 'sub']
            items = [self.cog, 'GO', 'Domain', 'KEGG', 'PPI', 'Subcelluar']
            for each_item in items:
                item_index = each_item.index(each_item)
                item_value = values[item_index]
                item_name = tmp_items[item_index]
                self.result_dict[item_name] = [each_item, item_value]
        else:
            logger.info('Loading all previous results.')
            self.read_results()
            values = []
            items = []
            for item in self.result_dict:
                item_name = self.result_dict[item][0]
                items.append(item_name)
                item_value = self.result_dict[item][1]
                values.append(item_value)
        with open(statistics_file, 'w') as o:
            header = 'Database\tPercentage\n'
            o.write(header)
            for key, value in self.result_dict.items():
                database_name = value[0]
                percentage = value[1]
                result_line = '{}\t{}\n'.format(database_name, percentage)
                o.write(result_line)
        logger.info('Drawing the proteome annotation statistics plots.')
        matplotlib.use('Agg')
        import matplotlib.pyplot as plt
        items = ['Unknown', 'Known']
        plt.figure(figsize=(10, 5))
        # 画已知功能的蛋白统计饼图
        plt.subplot(1, 2, 1)
        plt.pie(self.description_stat, labels=items, autopct='%1.1f%%',
                startangle=90, colors=('#E8E8E8', '#87CEFA'))
        plt.title('Pie chart for proteome functions')
        # 画功能注释结果统计条形图
        plt.subplot(1, 2, 2)
        # 柱子总数
        n = len(values)
        index = np.arange(n)
        # 柱子宽度
        width = 0.4
        # 设置纵轴的标题
        y_title = 'Percentage of proteome annotated'
        # 绘制柱状图，每根柱子的颜色设为紫罗兰色
        p = plt.bar(index, values, width, color='#87CEFA')
        # 设置纵轴范围0～100%
        plt.yticks(np.arange(0, 110, 10))
        # 设置横坐标的标题
        plt.xlabel('Functional descriptor')
        # 设置纵坐标的标题
        plt.ylabel(y_title)
        # 设置主标题
        plt.title('Functional annotation statistics')
        # 设置横坐标标签
        plt.xticks(index, items)
        # 添加百分比数字到每个柱子上方
        for value in p:
            height = value.get_height()
            value_position = value.get_x() + value.get_width() / 2
            plt.text(value_position, height * 1.005,
                     str(height), ha='center', va='bottom')
        # 使图片格局紧凑
        plt.tight_layout()
        # 保存图片
        plt.savefig(result_picture, dpi=300)


def domain(part):
    """
    此函数用于自动运行Domain的注释
    """
    if 'domain' in part:
        logger.info("Start domain annotation.")
        paras = P.seqs_separation()
        domain_runner(paras)
        P.parse_pfam()
        P.domain_annotation()


def subcellular(part):
    """
    此函数用于蛋白质亚细胞结构定位的注释。真核生物调用WoLFPSort预测，原核生物直接解析CELLO网站的结果。
    """
    if 'sub' in part:
        logger.info("Start subcelluar localization annotation.")
        software = P.sub_prediction()
        if software == 'wolf_psort':
            P.wolf_psort()
        else:
            paras = P.seqs_separation()
            subcelluar_runner(paras)
            P.parse_psortb()


def main_runner(pipe):
    """
    此函数用于自动运行整个注释流程
    """
    pipe.sequence_preparation()
    pipe.cog_go()
    domain(pipe.part)
    pipe.kegg()
    pipe.ppi()
    subcellular(pipe.part)
    pipe.annotation_statistics()


if __name__ == '__main__':
    # Run as a script
    __version__ = '1.5'
    # Parse command-line
    start_time = time.time()
    args = parse_cmdline()
    working_dir = os.getcwd()
    # Set up logging
    logger, formatter = new_logger()
    tmp_log_file = os.path.join(working_dir, 'log.txt')
    make_log_file(tmp_log_file)
    # Report arguments, if verbose.
    logger.info('Command line: [%s]' % ' '.join(sys.argv))
    logger.info(args)
    if args.part is None:
        args.part = ['all']
    pipeline = pipeline_formation(args.part)
    logger.info('Output directory: [%s].' % args.output)
    # Validate parameters
    parameters_validation(args.threads)
    # Make a output directory safely.
    make_output_dir()
    # Run main process.
    P = ProteinAnnotation()
    main_runner(P)
    # Report that we've finished.
    end_time = time.time()
    logger.info("All jobs have been done: %s." % time.asctime())
    logger.info("Total time taken: %.2fs." % (end_time - start_time))
    new_log_file = os.path.join(args.output, 'log.txt')
    os.system('cat {} >> {}'.format(tmp_log_file, new_log_file))
    os.remove(tmp_log_file)
    # Best wishes~
