# GF_PY312_CLASS_Crawler_by_BS4.py
# Create by GF 2025-03-01 21:12

import bs4
import requests
import time

class Crawler(object):

    def __init__(self):

        self.Cap_List_by_Ele_S = None
        self.Cap_List_by_Ele_E = None

        self.URL = None
        self.Some_Headers = [
            {# Default
                "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3"
            },

            {# mirrors.tuna.tsinghua.edu.cn 20250325
                "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
                "Accept-Encoding": "gzip, deflate, br, zstd",  # 注意处理压缩
                "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/134.0.0.0 Safari/537.36",
                "Referer": "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple/pip/",
                "Sec-Fetch-Dest": "document",
                "Sec-Fetch-Mode": "navigate",
                "Sec-Fetch-Site": "same-origin",
                "Upgrade-Insecure-Requests": "1"
            }
        ]
        self.Hedaers = Some_Headers[0]

    def Capture_List_by_Start_Element_to_End_Element(self, Src_List, Start_Element, End_Element):

        # 截取列表 - 通过起始元素到结束元素截取。
        # Capture List - by Start Element to End Element.
        #
        # Example:
        #
        # source_list = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h']
        # result_list = Capture_List_by_Start_Element_to_End_Element(source_list, 'b', 'e')
        # >>> print(result_list)
        # ['b', 'c', 'd', 'e']

        List_Length = len(Src_List)
        Num_Of_Iter = List_Length - 1

        # ..........................................

        Result:list = []

        # ..........................................

        Cap_Bgn_Idx = 0
        i:int = 0
        while (i <= Num_Of_Iter):
            if (Src_List[i] == Start_Element):
                Cap_Bgn_Idx = i
                break
            i = i + 1

        # ..........................................

        Cap_End_Idx = 0
        i:int = 0
        while (i <= Num_Of_Iter):
            if (Src_List[i] == End_Element):
                Cap_End_Idx = i
                break
            i = i + 1

        # ..........................................

        i:int = Cap_Bgn_Idx
        while (i <= Cap_End_Idx):
            Result.append(Src_List[i])
            i = i + 1

        # ..........................................

        return Result

    def Requests_2_x_Get_HTTP(self):

        URL     = self.URL
        Headers = self.Headers

        # ..........................................

        # 发送 HTTP 请求
        Response = requests.get(URL, headers=Headers)

        # 检查请求是否成功
        if (Response.status_code) == 200:
            return Response
        else:
            print(f"Failed to retrieve the webpage. Status code: {response.status_code}")
            return None

    def Requests_2_x_Download_File(self, URL, Save_Path):
    
        # Requests 2.x Download File (Requests 2.x 下载文件)
        #
        # Example:
        # >>> url = "https://example.com/file.pdf"
        # >>> Requests_2_x_Download_File(url, "file.pdf")
        # True
        
        Headers = self.Headers
        # ..........................................
        response = requests.get(URL, headers=Headers, stream=True)
        # ..........................................
        if response.status_code == 200:
            
            with open(Save_Path, 'wb') as f:
                f.write(response.content)
            return True
        # ..........................................
        else:
            return False

    def ollama_com_Collect_Metadata_20250301(self):

        # Web Page Structure:
        #
        # <ul>
        # <div>Metadata</div>
        # <li><div>general.architecture</div>             <div>nomic-bert</div></li>
        # <li><div>general.file_type</div>                <div>1</div></li>
        # <li><div>general.name</div>                     <div>nomic-embed-text-v1.5</div></li>
        # <li><div>nomic-bert.attention.causal</div>      <div>false</div></li>
        # <li><div>nomic-bert.attention.head_count</div>  <div>12</div></li>
        # ......
        # </ul>

        Response = self.Requests_2_x_Get_HTTP()

        # ..........................................

        if (Response == None):
            return None

        # ..........................................

        # 解析 HTML 内容
        Soup = bs4.BeautifulSoup(Response.text, 'html.parser')

        #Output[0] = Soup # -> 将 soup 带出函数外, 以便调试使用

        # ..........................................

        html_col_1 = Soup.select("li .text-neutral-600")
        text_col_1 = list( map(lambda x: x.text, html_col_1) )
        text_col_1 = self.Capture_List_by_Start_Element_to_End_Element(
                        text_col_1,
                        self.Cap_List_by_Ele_S,
                        self.Cap_List_by_Ele_E
                     )

        # ..........................................

        html_col_3 = Soup.select("li .font-mono.font-medium")
        text_col_3 = list( map(lambda x: x.text, html_col_3) )
        text_col_3 = list( map(lambda x: x.replace("\n    ", str('')), text_col_3) )

        # ..........................................

        Result = {
            "col_1": text_col_1,
            "col_3": text_col_3
        }

        return Result

    def ollama_com_Collect_Tensor_20250301(self):

        # Web Page Structure:
        #
        # <ul>
        # ......
        # <div>Tensor</div>
        # <div>Name</div>                         <li><div>Type</div></li>  <li><div>Shape</div></li>
        # <li><div>token_embd.weight              <li><div>F16</div></li>   <li><div>[768, 30522]</div></li>
        # <li><div>blk.0
        # <li><div>blk.0.attn_output.weight       <li><div>F16</div></li>   <li><div>[768, 768]</div></li>
        # <li><div>blk.0.attn_output_norm.bias    <li><div>F32</div></li>   <li><div>[768]</div></li>
        # <li><div>blk.0.attn_output_norm.weight  <li><div>F32</div></li>   <li><div>[768]</div></li>
        # <li><div>blk.0.attn_qkv.weight          <li><div>F16</div></li>   <li><div>[768, 2304]</div></li>
        # ......
        # </ul>

        Response = self.Requests_2_x_Get_HTTP()

        # ..........................................

        if (Response == None):
            return None

        # ..........................................

        # 解析 HTML 内容
        Soup = bs4.BeautifulSoup(Response.text, 'html.parser')

        #Output[0] = Soup # -> 将 soup 带出函数外, 以便调试使用

        # ..........................................

        html_col_1 = Soup.select("li .text-neutral-600")
        text_col_1 = list( map(lambda x: x.text, html_col_1) )
        text_col_1 = self.Capture_List_by_Start_Element_to_End_Element(
                        text_col_1,
                        self.Cap_List_by_Ele_S,
                        self.Cap_List_by_Ele_E
                     )

        # ..........................................

        html_col_2 = Soup.select("li .col-span-1.font-mono")
        text_col_2 = list( map(lambda x: x.text, html_col_2) )

        # ..........................................

        html_col_3 = Soup.select("li .col-span-3.font-mono")
        text_col_3 = list( map(lambda x: x.text, html_col_3) )

        # ..........................................

        Result = {
            "col_1": text_col_1,
            "col_2": text_col_2,
            "col_3": text_col_3
        }

        return Result

    def mirrors_tuna_tsinghua_edu_cn_Collect_PyPI_Href_20250324(self, Package, Version):

        # Web Page Structure:
        #
        # <body>
        #     <h1>Links for Flask</h1>
        #     <a href="../../packages/6e/49/Flask-0.1.tar.gz#sha256=9da8">Flask-0.1.tar.gz</a><br/>
        #     <a href="../../packages/db/9c/Flask-0.10.1.tar.gz#sha256=4c83">Flask-0.10.1.tar.gz</a><br/>
        #     <a href="../../packages/f3/46/Flask-0.10.tar.gz#sha256=84b3">Flask-0.10.tar.gz</a><br/>
        #     <a href="../../packages/ac/0b/Flask-0.11-py2.py3-none-any.whl#sha256=6b22">Flask-0.11-py2.py3-none-any.whl</a><br/>
        #     <a href="../../packages/63/2b/Flask-0.11.1-py2.py3-none-any.whl#sha256=a4f9">Flask-0.11.1-py2.py3-none-any.whl</a><br/>
        #     ......
        # </body>

        Combine_URL = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple" + '/' + Package
        
        self.URL = Combine_URL
        
        # ..........................................

        Response = self.Requests_2_x_Get_HTTP()
        
        time.sleep(3)

        # ..........................................

        if (Response == None):
            return None

        # ..........................................

        # 解析 HTML 内容
        Soup = bs4.BeautifulSoup(Response.text, 'html.parser')

        #Output[0] = Soup # -> 将 soup 带出函数外, 以便调试使用

        # ..........................................

        # 查找所有的 <a> 标签
        # find_all 返回值: bs4.element.ResultSet (可迭代对象)
        # find_all 内含值: 每个元素是一个 bs4.element.Tag 类型
        a_tag_list = Soup.find_all('a')

        # ..........................................

        # 提取所有的 <a> 标签中的 href 内容, 提取为 str 类型
        href_text_list = list( map(lambda x: x.get('href'), a_tag_list) )

        # ..........................................

        # 筛选所有 href 内容中包含 Package Name 和 Version 的元素
        filtered_list = [ x for x in href_text_list if Version in x]

        # ..........................................

        Result = filtered_list

        return Result

# EOF Signed by GF.
