import os
import sys
import bz2
import time
import glob
import shutil
import pickle
import fnmatch
import sqlite3
import requests
import argparse
from contextlib import closing
from xml.dom.minidom import parse
import xml.dom.minidom
import re
import requests


def get_dir_list(url_link):
    res = requests.get(url_link)
    if res.status_code != 200:
        return []
    return re.findall('href=\"[^?].*\">(.*)</a>', res.text)[1:]


def get_primary_sqlite_bz2_link(url_link):
    res = requests.get(url_link)
    if res.status_code != 200:
        return ""
    return url_link + '/' + re.findall('href=\"([^?].*)\">.*-primary\.sqlite\.bz2</a>', res.text)[0]


def get_link_repo_map(base_url, date_str):
    get_version = get_arch = get_dir_list
    version_list = get_version(f"{base_url}{date_str}")
    link_map = {}
    for version in version_list:
        arch_list = get_arch(f"{base_url}{date_str}/{version}")
        for arch in arch_list:
            link_map['#'.join([version, arch]).replace('/', '')] = f"{base_url}{date_str}/{version}{arch}".rstrip('/')
    return link_map


def dir_init(file_path_all):
    file_path_all = os.path.dirname(file_path_all)
    if not os.path.exists(file_path_all):
        print("初始化文件夹：", file_path_all)
        os.makedirs(file_path_all)


def download(file_url, save_path_dir):
    file_name = os.path.basename(file_url)
    file_path = os.path.join(save_path_dir, file_name)
    dir_init(file_path)
    try:
        with open(file_path, 'wb') as f:
            with closing(requests.get(file_url, stream=True)) as response:
                chunk_size = 1024  # 单次请求最大值
                content_size = int(response.headers['content-length'])  # 内容体总大小
                data_count = 0
                for data in response.iter_content(chunk_size=chunk_size):
                    f.write(data)
                    data_count = data_count + len(data)
                    now_jd = (data_count / content_size) * 100
        # print("下载完成：", file_name)
        return file_path
    except Exception as e:
        # print("下载失败：", file_name)
        return ""


def extract_file_from_bz2(file_path):
    file = bz2.open(file_path, 'r')
    file_path_bz2 = file_path.rstrip(".bz2")
    with open(file_path_bz2, 'wb') as f:
        f.write(file.read())
    return file_path_bz2


def get_db(sql_path):
    return sqlite3.connect(sql_path)


def deal_select(sql, sql_path, db_conn=None):
    if db_conn is None:
        db_conn = get_db(sql_path)
    cursor = db_conn.cursor()
    cursor.execute(sql)
    rs = cursor.fetchall()
    return rs


def get_href_from_xml_file(file_path_xml):
    DOMTree = xml.dom.minidom.parse(file_path_xml)
    collection = DOMTree.documentElement
    for c in collection.getElementsByTagName("data"):
        if c.getAttribute("type") == "primary_db":
            return os.path.basename(c.getElementsByTagName("location")[0].getAttribute("href"))


def get_repo_info_sqlite(link_repo, save_path):
    # link_repo = "https://update.cs2c.com.cn/NS/V10/V10SP1/os/adv/lic/base/x86_64/"
    link_repo = link_repo.rstrip('/')
    link_repodata_xml = link_repo + "/repodata/repomd.xml"
    file_path_xml = download(link_repodata_xml, save_path)
    link_repodata_sqlite_bz2 = link_repo + "/repodata/" + get_href_from_xml_file(file_path_xml)
    file_path_sqlite_bz2 = download(link_repodata_sqlite_bz2, save_path)
    file_path_sqlite = extract_file_from_bz2(file_path_sqlite_bz2)
    repo_info = {
        "link_repo": link_repo,
        "link_repodata_xml": link_repodata_xml,
        "link_repodata_sqlite_bz2": link_repodata_sqlite_bz2,
        "file_path_xml": file_path_xml,
        "file_path_sqlite_bz2": file_path_sqlite_bz2,
        "file_path_sqlite": file_path_sqlite
    }
    return repo_info


def get_repo_data(base_url, date_str, save_path):
    link_repo_map = get_link_repo_map(base_url, date_str)
    repo_info_map = {}
    for tag, link_repo in link_repo_map.items():
        repo_info_map[tag] = get_repo_info_sqlite(link_repo, save_path)
    return repo_info_map


def repo_info_map_save(repo_info_map, save_path):
    with open(save_path, 'wb') as f:
        pickle.dump(repo_info_map, f)


def repo_info_map_load(save_path):
    with open(save_path, 'rb') as f:
        repo_info_map = pickle.load(f)
        return repo_info_map


def rpm_filter(rpm_name, _rpm_name):
    *_rpm_name_list, _, _ = _rpm_name.split('-')
    _rpm_name = '-'.join(_rpm_name_list)
    if '*' in rpm_name:
        return fnmatch.fnmatch(_rpm_name, rpm_name)
    return rpm_name == _rpm_name


def search_rpm_from_sqlite(rpm_name, file_path_sqlite):
    sql_str = "select distinct rpm_sourcerpm from packages"
    group_all = [g_o[0] for g_o in deal_select(sql_str, file_path_sqlite)]
    rpm_all = [_rpm_name for _rpm_name in group_all if rpm_filter(rpm_name, _rpm_name.rstrip(".src.rpm"))]
    return rpm_all


def search_rpm_version(rpm_name, repo_info_map):
    rpm_repo_info = {}
    for tag, repo_info in repo_info_map.items():
        rpm_repo_info[tag] = search_rpm_from_sqlite(rpm_name, repo_info.get("file_path_sqlite"))
    return rpm_repo_info


def get_email_head():
    email_template_head = """
  您好，现有CVE漏洞修复相关的测试项申请，相关内容请您查收。
"""
    return email_template_head


def get_email_body(str_product="无", str_pkg_name="无"):
    email_template_body = f"""

【产品名称】：{str_product}

【包名】：
{str_pkg_name}

"""
    return email_template_body


def get_email_tail(str_date=None, str_arch="x86,aarch64,mips,loongarch"):
    if not str_date:
        str_date = time.strftime("%Y%m%d")
    email_template_tail = f"""
【涉及CVE】：（详情可参考附件（iso下载链接和修复内容及优先级-{str_date}.xlsx））

【单包源地址】：

    http://172.30.12.115/pkg/{str_date}/

【涉及架构】：{str_arch}

【软件包状态】：研发人员已测试软件包安装没问题。

【测试项需求】：参考附件（iso下载链接和修复内容及优先级-{str_date}.xlsx）

【测试系统  】：参考附件（iso下载链接和修复内容及优先级-{str_date}.xlsx）

【测试项说明】：

 此CVE暂无攻击脚本，目前暂无切实可行的方法来触发该漏洞。请测试人员按照测试项需求来进行测试。 

【依赖包地址】：无

----

"""
    return email_template_tail


def get_email(single_package_info, str_date=None):
    try:
        arch_candidate = ["aarch64", "x86_64", "loongarch64", "mips64el"]
        arch_list = [arch for arch in [arch.split('#')[1] for arch in single_package_info.keys()] if arch in arch_candidate]
        str_arch = ", ".join(sorted(set(arch_list)))
        str_product = ", ".join(sorted(set([arch.split('#')[0] for arch in single_package_info.keys()])))
        str_pkg_name = ""
        for product_arch, pkgs in single_package_info.items():
            product, arch = product_arch.split('#')
            if arch not in arch_candidate:
                continue
            str_pkg_name += f"{' ' * 2}{product}({arch})\n"
            str_pkg_name += "\n".join(map(lambda x: ' ' * 4 + x, pkgs))
            str_pkg_name += '\n\n'
        return get_email_head() + get_email_body(str_product, str_pkg_name) + get_email_tail(str_date, str_arch)
    except Exception as e:
        return "邮件生成失败" + str(e)


def cache_clear(save_path):
    def remove_file(file_path):
        # print("清除缓存文件：", file_path)
        os.remove(file_path)
    for file_path in glob.glob(os.path.join(save_path, "*.bz2")):
        remove_file(file_path)
    for file_path in glob.glob(os.path.join(save_path, "*.sqlite")):
        remove_file(file_path)


def get_repo_data_walk(date_str, base_file_path="/var/www/html/pkg", save_path="/tmp/temp4single_package_parse"):
    """
    返回值：
        "8U2#aarch64": {
            "file_path_sqlite": file_path_sqlite
        }
    """
    cache_clear(save_path)
    if not os.path.exists(save_path):
        os.makedirs(save_path)
    parent_path = os.path.abspath(os.path.join(base_file_path, date_str, "*/*/repodata/*-primary.sqlite.bz2"))
    file_bz2_list = glob.glob(parent_path)
    repo_info_map = {}
    for file_bz2 in file_bz2_list:
        file_bz2_new_path = os.path.join(save_path, os.path.basename(file_bz2))
        shutil.copyfile(file_bz2, file_bz2_new_path)
        *_, product, arch, _, _ = os.path.abspath(file_bz2).split(os.path.sep)
        repo_info_map['#'.join([product, arch])] = {"file_path_sqlite": extract_file_from_bz2(file_bz2_new_path)}
    return repo_info_map


def main(keyword="*"):
    date_str = time.strftime("%Y%m%d")
    temp_file_path = "/tmp/temp4rpm_single_package_parse"
    repo_info_map = get_repo_data(base_url="http://172.30.12.115/pkg/", date_str=date_str, save_path=temp_file_path)
    repo_info_map_save(repo_info_map, save_path=os.path.join(temp_file_path, "dir_map"))
    repo_info_map = repo_info_map_load(save_path=os.path.join(temp_file_path, "dir_map"))
    print("\n" + '-' * 100 + '\n')
    for k, v in search_rpm_version(keyword, repo_info_map).items():
        if len(v) > 0:
            print(k, ":\n\t", '、'.join(v))


def main_walk(keyword="*", date_str="20230216"):
    #date_str = time.strftime("%Y%m%d")
    #date_str = "20230216"
    repo_info_map = get_repo_data_walk(date_str)
    #search_rpm_version("*", repo_info_map)
    for k, v in search_rpm_version(keyword, repo_info_map).items():
        if len(v) > 0:
            print(k, ":", '、'.join(v))

if __name__ == "__main__":
    #walk(keyword="*")
    #main_walk(keyword="*")
    main_walk(sys.argv[1], sys.argv[2])
