from urllib.parse import parse_qs, urlencode,  urlsplit

import requests
from bs4 import BeautifulSoup

class urlTools:
    @staticmethod
    def setParm(url, parms):
        """
        向url设置参数，可以替换和新增参数
        :param url:
        :param parmsDict: {"page":"2"}或[{"page":"2"},...]
        :return:
        """
        parsed = urlsplit(url)
        query_dict = parse_qs(parsed.query)
        if isinstance(parms, dict):
            for key,val in parms.items():
                if key in query_dict:
                    query_dict[key][0] = val
                else:
                    query_dict[key] = [val]
        elif isinstance(parms, list):
            for parmDict in parms:
                for key,val in parmDict.items():
                    if key in query_dict:
                        query_dict[key][0] = val
                else:
                    query_dict[key] = [val]
        else:
            print("参数格式错误")
        query_new = urlencode(query_dict, doseq=True)
        parsed=parsed._replace(query=query_new)
        url_new = (parsed.geturl())
        return url_new

    @staticmethod
    def calPageCount(pageTootal, pageSize):
        pageCount = ((pageTootal-1)//pageSize) + 1
        return pageCount

    @staticmethod
    def getPageSoup(url, params):
        # 由于一般网站都是供用户访问 如果检测到User-Agent是黑客或者其他可能拒绝访问 故此处模拟浏览器
        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.71 Safari/537.36'
        }
        response = requests.get(url=url, params=params, headers=headers)
        # 以防乱码 此处将其编码设置为utf-8 因为有中文
        response.encoding = 'utf-8'
        # print(response.text)
        # 通过html.parser解析器把我们的HTML解析成了一棵树
        bs = BeautifulSoup(response.text, "html.parser")
        return bs

