#!/usr/bin/python3
import csv
import ast
import json
import re
import os
import shutil
import pprint
import signal
import hashlib
import time
from concurrent.futures import ThreadPoolExecutor
from queue import Queue
from collections import deque
from threading import Thread
from typing import Iterable
from bs4 import BeautifulSoup
import requests
from .bugzilla import Api
from .constant import (
    URL_REGEX,
    BUGZILLA_HOST,
    VERSION_REGEX,
    DEFAULT_SAVE_PATH,
    NVD_HOST,
)
from .logger import logger, get_spider_looger_str
from .httprequest import RemoteService


class Crawl:
    """
    CVE platform base class
    """

    def __init__(self, **kwargs) -> None:
        self.__dict__.update(**kwargs)

    def parse(self, response, **kwargs):
        """
        All enabled entrances
        Args:
            response: response
            **kwargs: kwargs

        Returns:
            None
        """
        raise NotImplementedError(
            f"{self.__class__.__name__}.parse callback is not defined"
        )

    @staticmethod
    def _re_escape(name: str):
        """escape str

        Args:
            name (str): pkgname

        Returns:
            str: escape new pkg name
        """
        if "+" in name:
            name = name.replace("+", "\+")
        if "^" in name:
            name = name.replace("^", "\^")
        if "$" in name:
            name = name.replace("$", "\$")
        if "." in name:
            name = name.replace(".", "\.")
        if "|" in name:
            name = name.replace("|", "\|")
        if "-" in name:
            name = name.replace("-", "\-")
        return name

    @property
    def folder(self):
        """
        The CVE folder in the specified directory
        """
        folder = getattr(self, "_folder", DEFAULT_SAVE_PATH)
        if hasattr(self, "pkg") and hasattr(self, "cve"):
            folder = os.path.join(folder, self.pkg + "-" + self.cve)
        return folder

    def _create_folder(self, version, folder=None):
        """
        Create a folder Path
        Args:
            version: version
            folder: The folder defaults to None

        Returns:
            path: folder path
        """
        if not folder:
            folder = self.folder

        path = os.path.join(folder, str(version))
        os.makedirs(os.path.dirname(path), exist_ok=True)
        return path

    def create_folders(self, versions):
        """
        Creating Folder Collection
        Args:
            versions: Multiple versions

        Returns:
            folder: folder path
        """
        if not versions:
            return self.folder

        for version in versions:
            return self._create_folder(version)


class FileHandle:
    """
    File handle
    """

    def copy(self, source_path, target_path):
        """
        Copy the files in the directory to each version folder
        Args:
            source_path: source path
            target_path: target path
        Returns:
            folder: folder path

        """
        os.makedirs(target_path, exist_ok=True)
        files_num = sum([os.path.isfile(listx)
                        for listx in os.listdir(target_path)])
        files = os.listdir(source_path)
        for index, file in enumerate(files, start=1):
            try:
                mv_file_name = file.split(
                    "_")[0] + "_{num}.patch".format(num=files_num + index)
                shutil.copyfile(
                    os.path.join(source_path, file), os.path.join(
                        target_path, mv_file_name)
                )
            except shutil.Error:
                logger.warning(
                    "The file '%s' of the same name exists in the folder : %s"
                    % (file, os.path.join(target_path, mv_file_name))
                )


class RequestsUrl(FileHandle):
    """
    The HTTP request
    """

    def __init__(self, url, crawl, callback=None, **kwargs) -> None:
        if not isinstance(crawl, Crawl):
            raise RuntimeError(
                "The crawl parameter is of the wrong type and should be a subclass of Crawl"
            )
        self.crawl = crawl
        self.url = url
        self.callback = callback or self._wget
        self.__dict__.update(**kwargs)

    def _wget(self, response, **kwargs):
        """
        Send the request
        Args:
            response: response
            **kwargs:

        Returns:

        """
        folder = self.crawl.folder or DEFAULT_SAVE_PATH
        tmp_path = os.path.join(folder, "tmp-folder")
        try:
            os.makedirs(tmp_path, exist_ok=True)
            with open(
                    os.path.join(
                        tmp_path, self.crawl.cve +
                        "_{r}.patch".format(
                            r=hashlib.sha1(self.url.encode()).hexdigest())
                    ),
                    "w",
                    encoding="utf-8",
            ) as file:
                file.write(response.text)
        except IOError as error:
            logger.error(error)
        else:
            folders = self.crawl.create_folders(self.crawl.versions)
            if isinstance(folders, str):
                folders = [folders]

            for target_folder in folders:
                self.copy(tmp_path, target_folder)
        finally:
            if os.path.exists(tmp_path):
                shutil.rmtree(tmp_path)


class RequestRepeat:
    """
    Request address duplicate determination
    """

    def __init__(self) -> None:
        self._fingerprints = set()

    def _to_bytes(self, text, encoding="utf-8"):
        if isinstance(text, bytes):
            return text
        return text.encode(encoding)

    def _request_figerprint(self, request):
        hash_fp = hashlib.sha1()
        hash_fp.update(self._to_bytes(request.crawl.cve))
        hash_fp.update(self._to_bytes(request.url))
        return hash_fp.hexdigest()

    def request_seen(self, request):
        """
        Check if the URL has already been processed
        """
        hash_fp = self._request_figerprint(request=request)
        if hash_fp in self._fingerprints:
            return True
        self._fingerprints.add(hash_fp)

        return False


class SavePipe(FileHandle):
    """
    Data storage pipeline
    """

    def __init__(self, crawl, **kwargs) -> None:
        """
        Initialize the class
        Args:
            crawl:
            **kwargs: crawl
        """
        if not isinstance(crawl, Crawl):
            raise RuntimeError(
                "The crawl parameter is of the wrong type and should be a subclass of Crawl"
            )
        self.crawl = crawl
        self.__dict__.update(**kwargs)

    def save_process(self, crawl):
        """
        Data storage pipeline
        Args:
            crawl: crawl

        Returns:

        """
        path = os.path.join(crawl.folder, "repair-verdict.txt")
        try:
            os.makedirs(crawl.folder, exist_ok=True)
            if not hasattr(self, "text") or not self.text:
                return
            with open(path, "a", encoding="utf-8") as file:
                file.write(getattr(self, "text", "") + "\n")
        except IOError as error:
            logger.error(error)


class Versions:
    """
    Version number processing
    """

    separator = (".", "-")
    _connector = "&"

    def _order(self, version, separator=None):
        """
        Version of the cutting
        Args:
            version: version
            separator: separator

        Returns:

        """
        if not separator:
            separator = self._connector
        return tuple([int(v) for v in version.split(separator) if v.isdigit()])

    def _similar(self, text, compare_text):
        """
        Similarity comparison
        Args:
            text: text
            compare_text: compare_text

        Returns:

        """
        _text = text if len(text) < len(compare_text) else compare_text
        weight = 0
        for index, _ in enumerate(_text):
            if text[index] != compare_text[index]:
                weight = index
                break
        return weight / len(text) * 1.0 * 100

    def similarity(self, text, compare_queue):
        """
        Compare the similarity of two strings
        """
        if isinstance(compare_queue, str):
            return self._similar(text, compare_queue)
        ratio = [self._similar(text, compare_text)
                 for compare_text in compare_queue]
        return ratio

    def match_version(self, pkg_name, fix_text):
        """
        Match the version number of the software from the repaired text
        """
        versions = list()
        for pkg_info in fix_text.split(","):
            if pkg_name.lower() in pkg_info.lower():
                _v = re.search(VERSION_REGEX, pkg_info)
                versions.append(_v.group())
        return versions

    def lgt(self, version, compare_version):
        """
        Returns true if the size of the compared version is greater
        than that of the compared version, or false otherwise

        """
        for separator in self.separator:
            version = self._connector.join(
                [v for v in version.split(separator)])
            compare_version = self._connector.join(
                [v for v in compare_version.split(separator)]
            )
        version = self._order(version)
        compare_version = self._order(compare_version)
        return version >= compare_version


class Nvd(Crawl):
    patch_regex = (
        "(?P<patch_url>https://[github.com|gitlab.com|git.{pkg}.org].*{pkg}.*/commit/\w+)|"
        "(?P<other_url>https://[github.com|gitlab.com].*{pkg}.*pull/\d+|https://[github.com|gitlab.com].*{pkg}.*commits/[A-Za-z0-9\.\+\/_-]+)"
    )

    def __init__(self, cve, pkg, versions, out_path=DEFAULT_SAVE_PATH):
        self.cve = cve
        self.pkg = pkg
        self.versions = versions
        self.fix_versions = None
        self._folder = out_path
        self.start_url = NVD_HOST + "/vuln/detail/" + cve

    def get_urls(self, resp_str, pkg):
        """
        Gets the URL of patch
        Args:
            resp_str: resp_str
            pkg: pkg

        Returns:

        """
        patch_urls = set()
        other_urls = set()
        pkg = Crawl._re_escape(pkg)
        for res in re.finditer(self.patch_regex.format(pkg=pkg), resp_str):
            patch_url = res["patch_url"] if res["patch_url"] else ""
            other_url = res["other_url"]
            if "git.{pkg}.org" in patch_url:
                patch_urls.add(patch_url.replace("commit", "patch"))
            elif patch_url:
                patch_urls.add(patch_url + ".diff")

            if other_url:
                other_urls.add(other_url)

        logger_str = get_spider_looger_str(
            "Nvd", self.pkg, self.versions, self.cve, patch_urls, other_urls
        )
        logger.info(logger_str)

        return patch_urls, other_urls

    def parse(self, response, **kwargs):
        """
        Parse response data
        Args:
            response: response
            **kwargs:

        Returns:

        """
        patch_urls, other_urls = self.get_urls(response.text, self.pkg)
        if not patch_urls:
            if not other_urls:
                text = "NVD result:There is no patch information and it has not been repaired"
                yield SavePipe(crawl=self, text=text)
            else:
                for other_url in other_urls:
                    text = "Uncertain patch information in Nvd%s" % other_url
                    yield SavePipe(crawl=self, text=text)
        else:
            for url in patch_urls:
                text = "find patch information in Nvd：%s" % url
                yield SavePipe(crawl=self, text=text)
                yield RequestsUrl(url=url, crawl=self, is_patch=True)


class Debian(Crawl):
    """
    Get the PR address in the Debian website
    """

    def __init__(self, cve, versions, pkg, out_path=DEFAULT_SAVE_PATH):
        """
        Initialize attribute
        Args:
            cve_num: cve number
        """
        self.cve = cve
        self.versions = versions
        self.pkg = pkg
        self._folder = out_path
        self.start_url = "https://security-tracker.debian.org/tracker/" + self.cve

    def parse(self, response, **kwargs):
        """
        Parse response data
        Args:
            response: response
            **kwargs:

        Returns:

        """
        patch_urls = []
        other_urls = []
        soup = BeautifulSoup(response.text, "html.parser")
        pre = soup.pre
        if pre:
            links = pre.find_all("a")
            for link in links:
                _url = link.get("href")
                if re.search(
                    "https://[github.com|gitlab.com|git.{pkg}.org].*{pkg}.*/commit/\w+".format(
                        pkg=Crawl._re_escape(self.pkg)
                    ),
                    _url,
                ):
                    if "git" in _url and "commit" in _url:
                        if "git.{}.org".format(self.pkg) in _url:
                            _url = _url.replace("commit", "patch")
                        else:
                            _url = _url + ".diff"
                        patch_urls.append(_url)

                else:
                    other_urls.append(_url)
        logger_str = get_spider_looger_str(
            "Debian", self.pkg, self.versions, self.cve, patch_urls, other_urls
        )
        logger.info(logger_str)
        for url in other_urls:
            text = "Uncertain patch information in Debian：%s" % url
            yield SavePipe(crawl=self, text=text)

        for patch_url in patch_urls:
            yield RequestsUrl(url=patch_url, crawl=self, is_patch=True)
            text = "find patch information in Debian：%s" % patch_url
            yield SavePipe(crawl=self, text=text)
        if not all([other_urls, patch_urls]):
            text = "Debian result:There is no patch information and it has not been repaired"
            yield SavePipe(crawl=self, text=text)


class Ubuntu(Crawl):
    """
    Get the PR address in the Ubuntu website
    """

    def __init__(self, cve, versions, pkg, out_path=DEFAULT_SAVE_PATH):
        """
        Initialize attribute
        Args:
            cve: cve number
        """
        self.cve = cve
        self.versions = versions
        self.pkg = pkg
        self._folder = out_path
        self.start_url = "https://ubuntu.com/security/" + self.cve

    def parse(self, response, **kwargs):
        """
            Parse the webpage and extract the url
        Args:
            response:webpage
        Returns:

        """
        git_list, other_list = [], []
        try:
            soup = BeautifulSoup(response.text, "lxml")
            content = soup.find(colspan="2")
            for url in content.find_all("a"):
                patch_url = url.get("href")
                if re.search(
                    "https://[github.com|gitlab.com|git.{pkg}.org].*{pkg}.*/commit/\w+".format(
                        pkg=Crawl._re_escape(self.pkg)
                    ),
                    patch_url,
                ):
                    if "git" and "commit" in patch_url:
                        if "git.{}.org".format(self.pkg) in url:
                            patch_url = url.replace("commit", "patch")
                        else:
                            patch_url = patch_url + ".diff"
                        git_list.append(patch_url)
                else:
                    other_list.append(patch_url)

            logger_str = get_spider_looger_str(
                "Ubuntu", self.pkg, self.versions, self.cve, git_list, other_list
            )
            logger.info(logger_str)

            for patch in git_list:
                yield RequestsUrl(url=patch, crawl=self, is_patch=True)
                text = "find patch information in Ubuntu：%s" % patch
                yield SavePipe(crawl=self, text=text)
            for url in other_list:
                text = "Uncertain patch information in Ubuntu：%s" % url
                yield SavePipe(crawl=self, text=text)
        except:
            logger.info(
                "Ubuntu result:There is no patch information and it has not been repaired"
            )
            yield SavePipe(
                crawl=self,
                text="Ubuntu result:There is no patch information and it has not been repaired",
            )


class Bugzilla(Crawl):
    """
    Bug fix
    """

    bugzilla_api = Api(base_url=BUGZILLA_HOST)

    def __init__(self, cve, pkg, versions, out_path=DEFAULT_SAVE_PATH):
        self._v = Versions()
        self.cve = cve
        self.pkg = pkg
        self.versions = versions
        self.fix_versions = None
        self._folder = out_path
        self.start_url = self.bugzilla_api.get_bug(fields=dict(alias=self.cve))

    @staticmethod
    def load_json(content):
        """
        Loading JSON data

        Args:
            content: The JSON content returned by the HTTP request
        """
        if not content:
            return dict()
        try:
            json_data = json.loads(content)
        except json.JSONDecodeError:
            json_data = dict()
        return json_data

    def _extract_fixed_in_version(self, bugs_info):
        """
        extract fixed in version

        Args:
            bugs_info: Description of CVE information
            pkg_name: The package name of the fix
        """
        fixed_in_versions = []
        for bug in bugs_info.get("bugs", []):
            fixed_in_versions.extend(
                self._v.match_version(self.pkg, bug.get("cf_fixed_in", ""))
            )

        return fixed_in_versions

    def _get_comments(self):
        """
        Get CVE comment information

        """
        status_code, response = self.bugzilla_api.get_comments(fields=self.cve)
        comments = dict()
        if status_code == requests.codes["ok"]:
            comments = Bugzilla.load_json(response)
        return comments

    def _filter_patch_remote(self, remote_url):
        """
        Filter out the patch pack path in GitHub based on the existing policy

        Args:
            remote_url:The remote address
        """
        if isinstance(remote_url, str):
            remote_url = [remote_url]
        patch_urls = list()
        _regex = "^https://(github.com|git.{pkg_name}.org).*{pkg_name}(.git)?/commit.*".format(
            pkg_name=Crawl._re_escape(self.pkg)
        )
        for url in remote_url:
            if not re.findall(_regex, url, flags=re.IGNORECASE):
                continue

            if "git.{}.org".format(self.pkg) in url:
                url = url.replace("commit", "patch")
            else:
                url = url + ".diff"

            patch_urls.append(url)

        return patch_urls

    def contrast_version(self, fix_versions, warehouse_versions):
        """
        Fix or affect version alignment
        """
        _fix_version = list()
        for _version in warehouse_versions:
            version_similarity = self._v.similarity(_version, fix_versions)
            compared_version = fix_versions[
                version_similarity.index(max(version_similarity))
            ]
            if self._v.lgt(_version, compared_version):
                _fix_version.append(_version)

        return _fix_version

    def _extract_url(self, comments):
        """
        Extracting comment information
        Args:
            comments:

        Returns:

        """
        pulls = []
        for _, comment in comments["bugs"].items():
            for comment_info in comment["comments"]:
                pulls.extend(re.findall(URL_REGEX, comment_info["text"]))
        return pulls

    def parse_comments(self, response, **kwargs):
        """
        Parse the bug comment content
        """
        comments = Bugzilla.load_json(response.text)
        if not comments:
            logger.info("Bugzilla result:{} has no comment".format(self.cve))
            yield SavePipe(crawl=self, text="Bugzilla result:There is no patch information and it has not been repaired")

        patchs = self._filter_patch_remote(
            remote_url=self._extract_url(comments))

        if not patchs:
            logger.info(
                "Bugzilla result:{} is not fixed yet".format(self.cve))
            yield SavePipe(
                crawl=self,
                text="Bugzilla result:no patch information in Bugzilla and it has not been fixed yet",
            )
        if patchs:
            logger.info(
                f"Bugzilla result: found patch urls :{pprint.pformat(patchs)}")

        for patch in patchs:
            text = "find patch information in Bugzilla：%s" % patch
            yield SavePipe(crawl=self, text=text)
            yield RequestsUrl(url=patch, crawl=self, is_patch=True)

    def parse(self, response, **kwargs):
        """
        Get the bug information for parsing
        """
        self.fix_versions = self._extract_fixed_in_version(
            bugs_info=Bugzilla.load_json(response.text)
        )
        # if not versions:
        #     self._save_comment(self.versions, "uninvolved")
        #     return
        # fix_versions = self.contrast_version(versions, self.versions)
        # uninvolved_versions = list(
        #     set(self.versions).difference(set(fix_versions)))
        # if fix_versions:
        #     self._save_comment(fix_versions, "uninvolved")

        # if uninvolved_versions:

        yield RequestsUrl(
            url=self.bugzilla_api.get_comments(fields=self.cve),
            crawl=self,
            callback=self.parse_comments,
        )


class Cardiac:
    """
    The engine center
    """

    download_queue = Queue()
    save_queue = Queue()
    engine_queue = Queue()
    retry_count = 3
    pool = ThreadPoolExecutor(max_workers=16)
    download_thread = deque(maxlen=100)
    server_host = 'http://124.160.11.57:8100/git_command'

    def __init__(self) -> None:
        self._active = True
        self.cve_infos = list()
        self._request_fingerprint = RequestRepeat()

    def _consume(self, cve, obj):
        if not issubclass(obj, Crawl):
            raise RuntimeError("")

        crawl = obj(**cve)
        return RequestsUrl(url=crawl.start_url, crawl=crawl, callback=crawl.parse)

    def _record_base_info(self, cve):
        text = "CVE Information : {cve}".format(cve=str(cve))
        return SavePipe(text=text, crawl=Crawl(**cve))

    def _req_flow(self):
        """
        Sign up for various platforms
        Returns:

        """
        for cve in self.cve_infos:
            yield self._consume(cve, obj=Bugzilla)
            yield self._consume(cve, obj=Debian)
            yield self._consume(cve, obj=Ubuntu)
            yield self._consume(cve, obj=Nvd)
            yield self._record_base_info(cve)

    def _internal_server(self, reuqest, pkg, commitid):
        first_char = pkg.lower()[0]
        success = False
        response = reuqest.request(url=self.server_host, method="post",
                                   json={
                                       "git_repo": f"upstream/{first_char}/{pkg}/{pkg}.git",
                                       "git_command": ["git-show", commitid, "--format=%e"]
                                   }, max_retry=1)
        if response.status_code == requests.codes["ok"]:
            success = True
        return success, response

    def _downloader(self, req_flow):
        """
        downloader
        Returns:

        """
        request = RemoteService()

        def _github(request, response=None, success=False):
            if not success:
                response = request.request(
                    url=req_flow.url, method="get", timeout=15)

                if response.status_code != requests.codes["ok"]:
                    self._download_failed(request=req_flow)
                    return

            callback = req_flow.callback(response)
            if isinstance(callback, Iterable):
                self._iterback(callback)

        if hasattr(req_flow, "is_patch") and getattr(req_flow, "is_patch"):
            success, response = self._internal_server(
                request, req_flow.crawl.pkg, req_flow.url.split("/")[-1].replace(".diff", ""))
            _github(request, response=response, success=success)
        else:
            _github(request)

    def _download_failed(self, request):
        text = "File download failed:%s" % request.url
        logger.error(text)
        self.engine_queue.put(SavePipe(text=text, crawl=request.crawl))

    def _iterback(self, callback):
        while self._active:
            try:
                self.engine_queue.put(callback.send(None))
            except StopIteration:
                break

    def download(self):
        """
        downloader
        """
        while self._active:
            request_flow = self.download_queue.get()
            if not request_flow:
                break
            if self._request_fingerprint.request_seen(request_flow):
                continue
            future = self.pool.submit(self._downloader, request_flow)
            self.download_thread.append(future)

    def save_pipe(self):
        """
        Save data pipe
        """

        while self._active:
            save_handle = self.save_queue.get()
            if not save_handle:
                break

            callback = save_handle.save_process(save_handle.crawl)
            if isinstance(callback, Iterable):
                self._iterback(callback)

    def stop(self):
        """
        Stop the service
        """

        self._active = False
        self.download_queue.put_nowait(None)
        self.engine_queue.put_nowait(None)
        self.save_queue.put_nowait(None)
        return not self._active

    def _read_csvfile(self, file, out_path):
        """
        Read the CSV file
        Args:
            file: CSV file

        Returns:

        """
        if not os.path.exists(file) or not os.path.isfile(file):
            logger.info("file not found:{}".format(file))
            return
        try:
            csv_data = csv.reader(open(file, encoding="utf-8"))
            for cveinfo in csv_data:
                cve, pkg, versions = cveinfo
                version = versions.replace("[", "").replace("]", "").split(",")
                self._set_cve(cve=cve, pkg=pkg,
                              versions=version, out_path=out_path)
        except csv.Error as error:
            logger.error(error)

    def _set_cve(self, cve, pkg, versions, out_path):
        if isinstance(versions, str):
            versions = [versions]
        self.cve_infos.append(
            {"cve": cve, "pkg": pkg, "versions": versions, "out_path": out_path})

    def start(self, args):
        """
        Start the service
        """
        # Get information such as CVE
        if args.f:
            self._read_csvfile(file=args.f, out_path=args.o)
        if args.cve:
            self._set_cve(cve=args.cve, pkg=args.name,
                          versions=args.v, out_path=args.o)

        if not self.cve_infos:
            logger.info("There is no CVE information queried this time")
            self.stop()

        req_flow = self._req_flow()
        while self._active:
            try:
                flow = next(req_flow)
                self.engine_queue.put(flow)
            except StopIteration:
                break

    def scheduler(self):
        """
        Task scheduler
        """
        while self._active:
            task = self.engine_queue.get()
            if not task:
                break
            if isinstance(task, RequestsUrl):
                self.download_queue.put(task)

            if isinstance(task, SavePipe):
                self.save_queue.put(task)


class CrawlerProcess:
    """
    The process of data crawling
    """

    def __init__(self) -> None:
        self.engine = Cardiac()
        self._thread_container = [self._downloader,
                                  self._scheduler, self._save_pipe]
        self.end_signal = False

    @property
    def finish(self):
        """Task queue consumption completed """
        return all(
            [
                self.engine.download_queue.empty(),
                self.engine.save_queue.empty(),
                self.engine.engine_queue.empty(),
            ]
        )

    def _heartbeat(self, interval=6):
        """
        Heartbeat mechanism
        Returns:

        """
        while True:
            time.sleep(interval)
            while len(self.engine.download_thread) > 0:
                thread = self.engine.download_thread.popleft()
                if not thread.done():
                    self.engine.download_thread.append(thread)
            if self.end_signal:
                self.engine.download_queue.queue.clear()
                self.engine.save_queue.queue.clear()
                self.engine.engine_queue.queue.clear()
                self.engine.stop()
                break
            if self.finish:
                self.engine.stop()
                break

    def _stop(self, signum, frame):
        self.engine.download_thread.clear()
        self.end_signal = True

    @property
    def _downloader(self):
        return Thread(target=self.engine.download)

    @property
    def _scheduler(self):
        return Thread(target=self.engine.scheduler)

    @property
    def _save_pipe(self):
        return Thread(target=self.engine.save_pipe)

    def parse_content(self, root, d):
        """[summary]
        """
        with open(os.path.join(root, d, "repair-verdict.txt.bak"), "r", encoding="utf-8") as file:
            suspected_spatch = []
            not_found_patch = []
            download_failed = []
            finds = []
            find_path = {}
            for line in file.readlines():
                if "cve information" in line.lower():
                    cve_info = line.replace("CVE Information : ", "")
                    cve_dict = ast.literal_eval(cve_info)
                if "Uncertain" in line:
                    suspected_spatch.append(
                        line.split("：")[1].split("\n")[0])
                if "no patch information" in line:
                    not_found_patch.append(line.split(" result:")[0])
                if "File download failed" in line:
                    download_failed.append(
                        line.split("failed:")[1].split("\n")[0])
                if "find patch information" in line:
                    find = line.split("：")[1].split("\n")[0]
                    if find not in finds:
                        finds.append(find)
                    find_path[line.split("in ")[1].split("：")[0]] = finds
            _content = {
                "pkg_name": cve_dict.get("pkg"),
                "cve": cve_dict.get("cve"),
                "versions": cve_dict.get("versions"),
                "Not found patch": ["Platform", list(set(not_found_patch))],
                "Seen patch files": ["Platform Patchs", find_path],
                "Download failed": ["Download Patchs URl", list(set(download_failed))],
                "Suspected Patchs URL": ["Suspected Patchs URL", list(set(suspected_spatch))]
            }
            return _content

    def _write_text_cont(self, heard, value, file):
        """[summary]

        Args:
            heard ([type]): [description]
            value ([type]): [description]
            file ([type]): [description]
        """
        file.write("=" * 20 + heard + "=" * 20 + "\n")
        if not value[1]:
            file.write("There is no information"+"\n\n")
            return
        if isinstance(value[1], dict):
            for plat, patch in value[1].items():
                seen_data = value[0].split(" ")
                for pat in patch:
                    file.write(seen_data[0]+" "+plat+" " +
                               seen_data[1]+": " + pat+"\n\n")
        else:
            for va in value[1]:
                file.write(value[0]+": " + va+"\n\n")

    def wirte_content(self, root, d, file):
        """[summary]

        Args:
            root ([type]): [description]
            d ([type]): [description]
            file ([type]): [description]
        """
        _content = self.parse_content(root, d)
        pkg_name = _content.get("pkg_name")
        cve = _content.get("cve")
        versions = _content.get("versions")
        title = (
            f"package name: {pkg_name}\n\nCVE number: {cve}\n\nversions: {pprint.pformat(versions)}\n\n")
        file.write(title)
        for heard, value in _content.items():
            if heard not in ["pkg_name", "cve", "versions"]:
                self._write_text_cont(heard, value, file)

    def rewrite_text(self, args):
        """[summary]
        """
        for root, dirs, files in os.walk(args.o):
            for d in dirs:
                try:
                    os.rename(os.path.join(root, d, "repair-verdict.txt"),
                              os.path.join(root, d, "repair-verdict.txt.bak"))
                    with open(os.path.join(root, d, "repair-verdict.txt"), "w", encoding="utf-8") as file:
                        self.wirte_content(root, d, file)
                except Exception as error:
                    logger.error(error)
                # finally:
                #     if os.path.exists(os.path.join(
                #             root, d, "repair-verdict.txt.bak")):
                #         os.remove(os.path.join(
                #             root, d, "repair-verdict.txt.bak"))

    def run(self, args):
        """
        Entrance to program execution
        """
        signal.signal(signal.SIGINT, self._stop)
        # Start Multiple Tasks
        while self._thread_container:
            task = self._thread_container.pop()
            task.start()

        self.engine.start(args)
        self._heartbeat()
        self.rewrite_text(args)


crawler = CrawlerProcess()

if __name__ == "__main__":
    cralwer = CrawlerProcess()
    cralwer.run(args="")
