import random
import socket
import time
import requests

from bs4 import BeautifulSoup


class MvnRepoCrawler:
    def __init__(self, pkg):
        self.pkg = pkg
        self.pkg_url = self.pkg.replace(':', '/')
        self.header = {
            'User-Agent': "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36 Edg/110.0.1587.57",
            'cookie': "_ga=GA1.2.859312568.1666764539; __gads=ID=57f7b3603f8b6759-22a1ee9ba7d80060:T=1669362430:RT=1669362430:S=ALNI_MZoNw7e3_AvcTSAnIt0zTL1tQDzXg; __gpi=UID=00000b833739fca2:T=1669362430:RT=1677242962:S=ALNI_MYosk9UYjPANqdpU6XQISsJnjJHgA; _gid=GA1.2.1180839294.1677383532; _gat=1; __cf_bm=f0jMDqOKXMQ21y3i4K9i0sBZFAQJT7rN8aGbY3GKpoU-1677897282-0-AWeC/E+haYRX3yYxrVm3IV7lEnT9rfiw3KwIlldXMuwI0TbWHanqszO6dEhxcA609FuFeQ8ylQAG25ss4Wnau3I5473r38NB9UXxJ4eQEq2m7ViX8onBceWf18RqhAXn+ZE13SgFK5DCFVV90CMFQRkXQ5KFBzvxdRt9ETHIAOmKijMEYEg2YegSugUxebb6hQ==; MVN_SESSION=eyJhbGciOiJIUzI1NiJ9.eyJkYXRhIjp7InVpZCI6ImNhOWZiMTExLTU0ZjQtMTFlZC1hN2NiLTlmMWFiZDdlMzI1ZSJ9LCJleHAiOjE3MDk0MzMyODcsIm5iZiI6MTY3Nzg5NzI4NywiaWF0IjoxNjc3ODk3Mjg3fQ.5qN_8ymN8JXJ1WxRdyssB5C41XiGWR_xjvKiwapZY_A"
        }

    def crawl_vuln_version(self):
        # 爬虫
        url = "https://mvnrepository.com/artifact/" + self.pkg_url
        print(url)
        html = requests.get(url, headers=self.header)
        time.sleep(0.5 * (1 + random.random()))

        # bs4解析
        soup = BeautifulSoup(html.text, "html.parser")
        tag_list = soup.select("#snippets > div > div > div > table > tbody > tr > td > a")

        # 漏洞版本和漏洞数识别获取
        vuln_version_list = []
        for tag in tag_list:
            if tag['class'][0] == 'vuln':
                href = tag['href']
                index = href.rindex('/')
                vuln_version = href[index + 1:]
                vuln_num = tag.get_text().split(" ")[0]
                # 将漏洞版本信息和漏洞数信息保存至字典
                vuln_dict = {
                    "vuln_version": vuln_version,
                    "vuln_num": int(vuln_num),
                }
                vuln_version_list.append(vuln_dict)
        if len(vuln_version_list) == 0:
            print(self.pkg + "不存在依赖版本或爬虫异常")

        return vuln_version_list

    def crawl_version_cve(self, version_vuln_dict):
        # 参数解析
        version = version_vuln_dict["vuln_version"]
        num = version_vuln_dict["vuln_num"]

        # 爬虫
        url = "https://mvnrepository.com/artifact/" + self.pkg_url + '/' + version
        print(url)
        try:
            html = requests.get(url, headers=self.header, timeout=10)
        except socket.error:
            print("出错啦！尝试重新爬取漏洞版本CVE")
            time.sleep(10)
            return self.crawl_version_cve(version_vuln_dict)
        time.sleep(1 * (1 + random.random()))

        # bs4解析
        soup = BeautifulSoup(html.text, "html.parser")
        tag_list = soup.select("body > div > main > div.content > table > tr > td > span > a")

        # 获取漏洞版本的CVE
        cve_list = []
        cve_cnt = 0
        for tag in tag_list:
            if tag['class'][0] == 'vuln':
                cve_list.append(tag.get_text())
                cve_cnt += 1
                if cve_cnt == num:
                    break
        if len(cve_list) == 0:
            print("解析" + version + "时爬虫出现异常")

        return cve_list
