import requests
from bs4 import BeautifulSoup
from pypinyin import pinyin, Style

def get_first_letters(name):
    # 将中文名字部分转换为拼音并获取首字母
    pinyin_list = pinyin(name, style=Style.FIRST_LETTER)
    chinese_part = ''.join([p[0] for p in pinyin_list])
    
    # 提取英文部分并将其转换为小写
    english_part = ''.join([c.lower() for c in name if not c.isalpha()])
    
    return chinese_part + english_part

# 发送HTTP请求获取HTML内容
url = '你的网页URL'
# response = requests.get(url)

# 打开HTML文件
with open('table.html', 'r', encoding='utf-8') as file:
    html_content = file.read()

# 使用Beautiful Soup解析HTML内容
soup = BeautifulSoup(html_content, 'html.parser')

# 找到所有的table标签
tables = soup.find_all('table')

# 遍历每个table标签
for table in tables:
    # 找到所有的行
    rows = table.find_all('tr')
    
    for row in rows:
        # 找到所有的列
        columns = row.find_all(['th', 'td'])  # th表示表头，td表示数据单元格
        
        # 打印每一列的文本内容
        row_id = 0
        row_tmp = ""

        first_letters = get_first_letters(columns[1].get_text().strip())
        # print(first_letters)

        # 先把 字段唯一标识 赋值给 row_tmp
        # row_tmp = first_letters

        for column in columns:
            row_id = row_id + 1
            # print(row_id)
            if row_id < 11:
                # row_tmp = first_letters
                if column.get_text():
                    if row_tmp:
                        row_tmp = row_tmp + "    " + column.get_text().strip()
                    else:
                        row_tmp = first_letters + "    " + column.get_text().strip()

        print(row_tmp)

# 这将打印出每个表格中的每一列和每一行的文本内容