import requests
from bs4 import BeautifulSoup
from my_fake_useragent import UserAgent
from lxml import etree
from urllib.request import quote,unquote
ua = UserAgent()

headers = {'User-Agent':ua.random}

url_path = 'https://www.wmo.int/cpdb/'
def scrapy(url):
    html = requests.get(url,headers=headers)
    # print(html.text)
    selector = etree.HTML(html.text)
    # print('111')
    infos = selector.xpath('//div[@class="table"]/table/tbody')
    print(infos)
    for info in infos:
        tmp = info.xpath('tr[2]/td[13]/text()')
        print(tmp)

if __name__ == '__main__':
    # country = input("国家：")
    # url = url_path + country
    # scrapy(url)
    name = '刘武'
    res = quote(name)
    print(res)
    reg = res.replace('%','%25')
    print(unquote(res))
    print(reg)

