import pandas as pd
import requests
import openpyxl
from collections import Counter
import zipfile
import os

cookies = {'HWWAFSESID': '916027372a6ce178c3', 'HWWAFSESTIME': '1718879093594', 'token': 'eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJhdWQiOm51bGwsInN1YiI6ImdpdGVlIiwiYXZhdGFyX3VybCI6Imh0dHBzOi8vZ2l0ZWUuY29tL2Fzc2V0cy9ub19wb3J0cmFpdC5wbmciLCJuYW1lIjoiTGJpdWJpdXR3byIsImlkIjoiMTQ0MTQ3NjgiLCJleHAiOjE3MTg4OTM0OTcsImxvZ2luIjoibGJpdWJpdXR3byJ9.eCD9BKMeSy6Y-O0oUNTSLwti0spmzeNU824gOT3rYBw'}
header = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/124.0.0.0 Safari/537.36'}

payload = {'productName': 'openEuler-24.03-LTS-everything-x86_64-dvd.iso', 'page': 0, 'size': 30}

url1 = 'https://sbom-service.osinfra.cn/sbom-api/querySbomPackages'

version1 = 'openEuler-24.03-LTS-everything-x86_64-dvd.iso'
version2 = '24.03-everything-x86'

# 发送第一个请求获取总页数和总条目数
response = requests.post(url1, headers=header, cookies=cookies, data=payload)

if response.status_code == 200:
    data = response.json()["content"]
    total_pages = response.json()["totalPages"]
    total_elements = response.json()["totalElements"]
else:
    print('请求失败，状态码:', response.status_code)
    exit()

# 初始化 DataFrame
df = pd.DataFrame(columns=["id", "affected_software", "count"])

# 循环查询每一页数据
for page in range(total_pages):
    payload["page"] = page
    response = requests.post(url1, headers=header, cookies=cookies, data=payload)
    if response.status_code == 200:
        data = response.json()["content"]
        id_list = [item["id"] for item in data]
        name_list = [item["name"] for item in data]
        statistics_sum_list = [sum(item["statistics"].values()) for item in data]
        page_df = pd.DataFrame({
            "id": id_list,
            "affected_software": name_list,
            "count": statistics_sum_list
        })
        df = pd.concat([df, page_df])
    else:
        print(f'请求失败，状态码:{response.status_code}')
        break

# 将 DataFrame 保存为 Excel 文件
df.to_excel('sbom_data.xlsx', index=False)

print('----------1----------')

# 读取affected_software_count2.0.xlsx和sbom_data.xlsx文件
df1 = pd.read_excel('affected_software_count3.0.xlsx')
df2 = pd.read_excel('sbom_data.xlsx')

# 找出affected_software_count2.0.xlsx有的而sbom-22.03-LTS.xlsx没有的数据
missing_data = df1[~df1['affected_software'].isin(df2['affected_software'])]

# 创建新的DataFrame来保存有的而另一个表格没有的数据
new_df1 = pd.DataFrame(missing_data['affected_software'])

# 处理count字段不同的情况
# 首先获得count字段不同的数据
different_count_data = df1.merge(df2, on='affected_software', suffixes=('_affected_software_count2.0', '_sbom-22.03-LTS'))
different_count_data = different_count_data[different_count_data['count_affected_software_count2.0'] != different_count_data['count_sbom-22.03-LTS']]

# 如果sbom-22.03-LTS.xlsx中count字段数据为0，则将数据放进第一种情况的xlsx表格中
zero_count_in_sbom = df2[df2['count'] == 0]
missing_data = pd.concat([missing_data, zero_count_in_sbom])

# 创建新的DataFrame来保存count字段不同的数据
new_df2 = pd.DataFrame({'affected_software': different_count_data['affected_software'],
                        'count_majun': different_count_data['count_affected_software_count2.0'],
                        'count_sbom': different_count_data['count_sbom-22.03-LTS']})
# 去除重复数据
new_df2 = new_df2.drop_duplicates()

# 保存处理后的数据到新的xlsx文件
with pd.ExcelWriter('miss_data.xlsx') as writer:
    new_df1.to_excel(writer, index=False, header=['affected_software'], sheet_name='Missing Data')

with pd.ExcelWriter('diff_data.xlsx') as writer:
    new_df2.to_excel(writer, index=False, sheet_name='Different Count Data')

print('----------2----------')

# 读取不同count数据的表格和SBOM表格
different_count_data = pd.read_excel('diff_data.xlsx')
sbom_data = pd.read_excel('sbom_data.xlsx')

# 合并两个表格的affected_software字段
merged_data = pd.merge(different_count_data, sbom_data, on='affected_software', how='inner')

# 选择需要的列
result = merged_data[['affected_software', 'id']]

# 将结果保存到新的表格
result.to_excel('merged_data.xlsx', index=False)

print('----------3----------')

# 从 merged_data.xlsx 中读取数据
merged_data = pd.read_excel('merged_data.xlsx')

# 创建空的 DataFrame 来存储结果
result_df = pd.DataFrame(columns=['affected_software', 'vuId'])

# 遍历 merged_data 中的每一行
for index, row in merged_data.iterrows():
    package_id = row['id']
    affected_software = row['affected_software']

    # 构建请求的 URL
    url2 = f'https://sbom-service.osinfra.cn/sbom-api/queryVulnerability/{version1}?page=0&size=30&packageId={package_id}'
    payload1 = {'packageId': package_id, 'page': 0, 'size': 30}
    # 发送请求
    response = requests.get(url2, headers=header, cookies=cookies, data=payload1)

    # 解析响应数据
    if response.status_code == 200:
        data = response.json()
        content = data['content']
        for item in content:
            vuId = item['vulId']
            result_df1 = pd.concat([result_df, pd.DataFrame({'affected_software': [affected_software], 'vuId': [vuId]})])

# 将结果保存到新的表格
result_df1.to_excel('sbom_cve_date.xlsx', index=False)

print('----------4----------')

# 读取数据
majun_data = pd.read_excel('majun_cve_data3.0.xlsx')
sbom_data = pd.read_excel('sbom_cve_date.xlsx')
missing_data = pd.read_excel('miss_data.xlsx')

# 创建空的 DataFrame 来存储结果
result_df2 = pd.DataFrame(columns=['id', 'affected_software', 'cve_num'])

# 遍历 majun_data 中的每一行
for index, row in majun_data.iterrows():
    id_value = row['id']
    cve_num = row['cve_num']
    affected_software = row['affected_software']

    # 检查 cve_num 是否在 sbom_data 的 affected_software 对应的 vuIds 数据中
    if affected_software in sbom_data['affected_software'].unique():
        vuIds = sbom_data[sbom_data['affected_software'] == affected_software]['vuId'].values[0]
        vuIds_list = vuIds.split(',')
        if cve_num not in vuIds_list:
            result_df2 = pd.concat([result_df2, pd.DataFrame({'id': [id_value], 'affected_software': [affected_software], 'cve_num': [cve_num]})], ignore_index=True)
    else:
        result_df2 = pd.concat([result_df2, pd.DataFrame({'id': [id_value], 'affected_software': [affected_software], 'cve_num': [cve_num]})], ignore_index=True)

# 找出共同的字段并进行匹配
merged_data = pd.merge(majun_data, missing_data, on='affected_software', how='inner')

# 增加 id 列
merged_data['id'] = merged_data['id'].astype(str)  # 确保 id 列中的值为字符串类型
merged_data = merged_data[['id', 'affected_software', 'cve_num']]  # 重新排列列的顺序

# 将结果保存到miss文件
merged_data.to_excel(f'miss-cve-{version2}.xlsx', index=False)

# 将结果保存到diff文件
result_df2.to_excel(f'diff-cve-{version2}.xlsx', index=False)


print('----------5----------')

# 文件列表
files_to_compress = [f'miss-cve-{version2}.xlsx', f'diff-cve-{version2}.xlsx', '说明文本.txt']
zip_filename = f'openEuler-{version2}.zip'

# 创建ZIP文件
with zipfile.ZipFile(zip_filename, 'w') as zipf:
    for file in files_to_compress:
        zipf.write(file)

# 检查ZIP文件是否创建成功
if os.path.exists(zip_filename):
    print(f'ZIP文件 {zip_filename} 创建成功！')
else:
    print('创建ZIP文件失败。')

print('----------6----------')

# 指定目标文件列表
target_files = ['affected_software_count3.0.xlsx', 'majun_cve_data3.0.xlsx']

# 指定目标文件夹路径
folder_path = "C:/Users/longxianliang/PycharmProjects/Sbom_cve_data_validation"  # 替换为目标文件夹的路径

# 检查路径是否存在
if not os.path.exists(folder_path):
    print(f"路径 '{folder_path}' 不存在")
else:
    # 遍历目标路径下的所有文件
    for filename in os.listdir(folder_path):
        # 检查文件是否以.xlsx结尾并且不在目标文件列表中
        if filename.endswith(".xlsx") and filename not in target_files:
            file_path = os.path.join(folder_path, filename)
            # 删除文件
            try:
                os.remove(file_path)
                print(f"已删除文件: {file_path}")
            except OSError as e:
                print(f"删除文件失败: {file_path} - {e}")


