import os
from bs4 import BeautifulSoup

"""
下载CWE数据集的爬虫.
"""
import time
import requests
from urllib.parse import urlencode

base_url = 'https://samate.nist.gov/SARD/test-cases/search?'

download_base_url = 'https://samate.nist.gov'

params = {
    "flaw[]": None,
    "language[]": None,
    "page": None,
    "limit": 100
}

def get_dataset_names(path):
    names = []
    with open(path, "r", encoding="utf-8") as f:
        for line in f.readlines():
            line = line.strip().replace("\n", "")
            if line != "":
                names.append(line)
    return names

# 根据数据集路径，来创建文件夹
def mkdir(base_dir="./data_raw", dataset_names=None):
    if not os.path.exists(base_dir):
        os.mkdir(base_dir)
    for name in dataset_names:
        path = f"{base_dir}/{name}"
        if not os.path.exists(path):
            os.mkdir(path)


def download_dataset(cwe_id, language, params, start_page = 1, last_page=100):
    all_page_urls = []
    for page_no in range(start_page, last_page+1):
        params['flaw[]'] = 'CWE-' + str(int(cwe_id.split('-')[-1]))
        params['language[]'] = language
        params['page'] = page_no
        url = base_url + urlencode(params)
        all_page_urls.append(url)

    print('一共有%d页，分别是：' % len(all_page_urls))
    print(all_page_urls)

    detail_urls = []
    for page_url in all_page_urls:
        print('正在从 %s 获取具体的testcase ur: ' % page_url)
        parse_page_detail(page_url, detail_urls)
    print('一共有%d个测试样例，分别是:' % len(detail_urls))
    print(detail_urls)

    # 将这些测试用例下载到对应的文件夹里面
    for i, detail_url in enumerate(detail_urls):
        time.sleep(0.2)
        save_path = download_case_zip(detail_url, cwe_id)
        print("保存第%d个文件成功：%s" % (i+1, save_path))

def download_case_zip(url, cwe_id):
    response = requests.get(url)
    html = response.text
    soup = BeautifulSoup(html, 'lxml')
    download_element = soup.find('a', class_='button download-version')
    if download_element is not None:
        file_url = download_element['href']
        file_name = file_url.split('/')[-1]
        save_path = './data_raw/%s/%s' % (cwe_id, file_name)
        case_response = requests.get(file_url)
        with open(save_path, 'wb') as f:
            f.write(case_response.content)
            # print('保存文件%s成功' % save_path)
            return save_path

def parse_page_detail(url, detail_urls):
    response = requests.get(url)
    html = response.text
    soup = BeautifulSoup(html, 'lxml')
    # 将本页的所有进行下载
    for test_case in soup.find_all('li', class_='card test-case-card animated'):
        link = test_case.find('a')['href']
        detail_urls.append(download_base_url + link)





if __name__ == '__main__':
    path = "./dataset.txt"
    dataset_names = get_dataset_names(path)
    dataset_names = ['CWE-191']
    mkdir(base_dir="./data_raw", dataset_names=dataset_names)
    print(dataset_names)
    for cwe_id in dataset_names:
        download_dataset(cwe_id = cwe_id, language='java', params = params, start_page=1, last_page=42)


